linux/drivers/scsi/bfa/bfa_svc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   3 * Copyright (c) 2014- QLogic Corporation.
   4 * All rights reserved
   5 * www.qlogic.com
   6 *
   7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
   8 *
   9 * This program is free software; you can redistribute it and/or modify it
  10 * under the terms of the GNU General Public License (GPL) Version 2 as
  11 * published by the Free Software Foundation
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 */
  18
  19#include "bfad_drv.h"
  20#include "bfad_im.h"
  21#include "bfa_plog.h"
  22#include "bfa_cs.h"
  23#include "bfa_modules.h"
  24
  25BFA_TRC_FILE(HAL, FCXP);
  26BFA_MODULE(fcdiag);
  27BFA_MODULE(fcxp);
  28BFA_MODULE(sgpg);
  29BFA_MODULE(lps);
  30BFA_MODULE(fcport);
  31BFA_MODULE(rport);
  32BFA_MODULE(uf);
  33
  34/*
  35 * LPS related definitions
  36 */
  37#define BFA_LPS_MIN_LPORTS      (1)
  38#define BFA_LPS_MAX_LPORTS      (256)
  39
  40/*
  41 * Maximum Vports supported per physical port or vf.
  42 */
  43#define BFA_LPS_MAX_VPORTS_SUPP_CB  255
  44#define BFA_LPS_MAX_VPORTS_SUPP_CT  190
  45
  46
  47/*
  48 * FC PORT related definitions
  49 */
  50/*
  51 * The port is considered disabled if corresponding physical port or IOC are
  52 * disabled explicitly
  53 */
  54#define BFA_PORT_IS_DISABLED(bfa) \
  55        ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
  56        (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
  57
  58/*
  59 * BFA port state machine events
  60 */
  61enum bfa_fcport_sm_event {
  62        BFA_FCPORT_SM_START     = 1,    /*  start port state machine    */
  63        BFA_FCPORT_SM_STOP      = 2,    /*  stop port state machine     */
  64        BFA_FCPORT_SM_ENABLE    = 3,    /*  enable port         */
  65        BFA_FCPORT_SM_DISABLE   = 4,    /*  disable port state machine */
  66        BFA_FCPORT_SM_FWRSP     = 5,    /*  firmware enable/disable rsp */
  67        BFA_FCPORT_SM_LINKUP    = 6,    /*  firmware linkup event       */
  68        BFA_FCPORT_SM_LINKDOWN  = 7,    /*  firmware linkup down        */
  69        BFA_FCPORT_SM_QRESUME   = 8,    /*  CQ space available  */
  70        BFA_FCPORT_SM_HWFAIL    = 9,    /*  IOC h/w failure             */
  71        BFA_FCPORT_SM_DPORTENABLE = 10, /*  enable dport      */
  72        BFA_FCPORT_SM_DPORTDISABLE = 11,/*  disable dport     */
  73        BFA_FCPORT_SM_FAA_MISCONFIG = 12,       /* FAA misconfiguratin */
  74        BFA_FCPORT_SM_DDPORTENABLE  = 13,       /* enable ddport        */
  75        BFA_FCPORT_SM_DDPORTDISABLE = 14,       /* disable ddport       */
  76};
  77
  78/*
  79 * BFA port link notification state machine events
  80 */
  81
  82enum bfa_fcport_ln_sm_event {
  83        BFA_FCPORT_LN_SM_LINKUP         = 1,    /*  linkup event        */
  84        BFA_FCPORT_LN_SM_LINKDOWN       = 2,    /*  linkdown event      */
  85        BFA_FCPORT_LN_SM_NOTIFICATION   = 3     /*  done notification   */
  86};
  87
  88/*
  89 * RPORT related definitions
  90 */
  91#define bfa_rport_offline_cb(__rp) do {                                 \
  92        if ((__rp)->bfa->fcs)                                           \
  93                bfa_cb_rport_offline((__rp)->rport_drv);      \
  94        else {                                                          \
  95                bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
  96                                __bfa_cb_rport_offline, (__rp));      \
  97        }                                                               \
  98} while (0)
  99
 100#define bfa_rport_online_cb(__rp) do {                                  \
 101        if ((__rp)->bfa->fcs)                                           \
 102                bfa_cb_rport_online((__rp)->rport_drv);      \
 103        else {                                                          \
 104                bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
 105                                  __bfa_cb_rport_online, (__rp));      \
 106                }                                                       \
 107} while (0)
 108
 109/*
 110 * forward declarations FCXP related functions
 111 */
 112static void     __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
 113static void     hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
 114                                struct bfi_fcxp_send_rsp_s *fcxp_rsp);
 115static void     hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
 116                                struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
 117static void     bfa_fcxp_qresume(void *cbarg);
 118static void     bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
 119                                struct bfi_fcxp_send_req_s *send_req);
 120
 121/*
 122 * forward declarations for LPS functions
 123 */
 124static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
 125                struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
 126static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
 127                                struct bfa_iocfc_cfg_s *cfg,
 128                                struct bfa_pcidev_s *pcidev);
 129static void bfa_lps_detach(struct bfa_s *bfa);
 130static void bfa_lps_start(struct bfa_s *bfa);
 131static void bfa_lps_stop(struct bfa_s *bfa);
 132static void bfa_lps_iocdisable(struct bfa_s *bfa);
 133static void bfa_lps_login_rsp(struct bfa_s *bfa,
 134                                struct bfi_lps_login_rsp_s *rsp);
 135static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
 136static void bfa_lps_logout_rsp(struct bfa_s *bfa,
 137                                struct bfi_lps_logout_rsp_s *rsp);
 138static void bfa_lps_reqq_resume(void *lps_arg);
 139static void bfa_lps_free(struct bfa_lps_s *lps);
 140static void bfa_lps_send_login(struct bfa_lps_s *lps);
 141static void bfa_lps_send_logout(struct bfa_lps_s *lps);
 142static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
 143static void bfa_lps_login_comp(struct bfa_lps_s *lps);
 144static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
 145static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
 146
 147/*
 148 * forward declaration for LPS state machine
 149 */
 150static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
 151static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
 152static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
 153                                        event);
 154static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
 155static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
 156                                        enum bfa_lps_event event);
 157static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
 158static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
 159                                        event);
 160
 161/*
 162 * forward declaration for FC Port functions
 163 */
 164static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
 165static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
 166static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
 167static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
 168static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
 169static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
 170static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
 171                        enum bfa_port_linkstate event, bfa_boolean_t trunk);
 172static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
 173                                enum bfa_port_linkstate event);
 174static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
 175static void bfa_fcport_stats_get_timeout(void *cbarg);
 176static void bfa_fcport_stats_clr_timeout(void *cbarg);
 177static void bfa_trunk_iocdisable(struct bfa_s *bfa);
 178
 179/*
 180 * forward declaration for FC PORT state machine
 181 */
 182static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
 183                                        enum bfa_fcport_sm_event event);
 184static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
 185                                        enum bfa_fcport_sm_event event);
 186static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
 187                                        enum bfa_fcport_sm_event event);
 188static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
 189                                        enum bfa_fcport_sm_event event);
 190static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
 191                                        enum bfa_fcport_sm_event event);
 192static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
 193                                        enum bfa_fcport_sm_event event);
 194static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
 195                                        enum bfa_fcport_sm_event event);
 196static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
 197                                        enum bfa_fcport_sm_event event);
 198static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
 199                                        enum bfa_fcport_sm_event event);
 200static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
 201                                        enum bfa_fcport_sm_event event);
 202static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
 203                                        enum bfa_fcport_sm_event event);
 204static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
 205                                        enum bfa_fcport_sm_event event);
 206static void     bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
 207                                        enum bfa_fcport_sm_event event);
 208static void     bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
 209                                        enum bfa_fcport_sm_event event);
 210static void     bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
 211                                        enum bfa_fcport_sm_event event);
 212
 213static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
 214                                        enum bfa_fcport_ln_sm_event event);
 215static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
 216                                        enum bfa_fcport_ln_sm_event event);
 217static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
 218                                        enum bfa_fcport_ln_sm_event event);
 219static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
 220                                        enum bfa_fcport_ln_sm_event event);
 221static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
 222                                        enum bfa_fcport_ln_sm_event event);
 223static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
 224                                        enum bfa_fcport_ln_sm_event event);
 225static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
 226                                        enum bfa_fcport_ln_sm_event event);
 227
 228static struct bfa_sm_table_s hal_port_sm_table[] = {
 229        {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
 230        {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
 231        {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
 232        {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
 233        {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
 234        {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
 235        {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
 236        {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
 237        {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
 238        {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
 239        {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
 240        {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
 241        {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
 242        {BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
 243        {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
 244};
 245
 246
 247/*
 248 * forward declaration for RPORT related functions
 249 */
 250static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
 251static void             bfa_rport_free(struct bfa_rport_s *rport);
 252static bfa_boolean_t    bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
 253static bfa_boolean_t    bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
 254static bfa_boolean_t    bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
 255static void             __bfa_cb_rport_online(void *cbarg,
 256                                                bfa_boolean_t complete);
 257static void             __bfa_cb_rport_offline(void *cbarg,
 258                                                bfa_boolean_t complete);
 259
 260/*
 261 * forward declaration for RPORT state machine
 262 */
 263static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
 264                                        enum bfa_rport_event event);
 265static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
 266                                        enum bfa_rport_event event);
 267static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
 268                                        enum bfa_rport_event event);
 269static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
 270                                        enum bfa_rport_event event);
 271static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
 272                                        enum bfa_rport_event event);
 273static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
 274                                        enum bfa_rport_event event);
 275static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
 276                                        enum bfa_rport_event event);
 277static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
 278                                        enum bfa_rport_event event);
 279static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
 280                                        enum bfa_rport_event event);
 281static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
 282                                        enum bfa_rport_event event);
 283static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
 284                                        enum bfa_rport_event event);
 285static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
 286                                        enum bfa_rport_event event);
 287static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
 288                                        enum bfa_rport_event event);
 289
 290/*
 291 * PLOG related definitions
 292 */
 293static int
 294plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
 295{
 296        if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
 297                (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
 298                return 1;
 299
 300        if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
 301                (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
 302                return 1;
 303
 304        return 0;
 305}
 306
 307static u64
 308bfa_get_log_time(void)
 309{
 310        u64 system_time = 0;
 311        struct timeval tv;
 312        do_gettimeofday(&tv);
 313
 314        /* We are interested in seconds only. */
 315        system_time = tv.tv_sec;
 316        return system_time;
 317}
 318
 319static void
 320bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
 321{
 322        u16 tail;
 323        struct bfa_plog_rec_s *pl_recp;
 324
 325        if (plog->plog_enabled == 0)
 326                return;
 327
 328        if (plkd_validate_logrec(pl_rec)) {
 329                WARN_ON(1);
 330                return;
 331        }
 332
 333        tail = plog->tail;
 334
 335        pl_recp = &(plog->plog_recs[tail]);
 336
 337        memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
 338
 339        pl_recp->tv = bfa_get_log_time();
 340        BFA_PL_LOG_REC_INCR(plog->tail);
 341
 342        if (plog->head == plog->tail)
 343                BFA_PL_LOG_REC_INCR(plog->head);
 344}
 345
 346void
 347bfa_plog_init(struct bfa_plog_s *plog)
 348{
 349        memset((char *)plog, 0, sizeof(struct bfa_plog_s));
 350
 351        memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
 352        plog->head = plog->tail = 0;
 353        plog->plog_enabled = 1;
 354}
 355
 356void
 357bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 358                enum bfa_plog_eid event,
 359                u16 misc, char *log_str)
 360{
 361        struct bfa_plog_rec_s  lp;
 362
 363        if (plog->plog_enabled) {
 364                memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 365                lp.mid = mid;
 366                lp.eid = event;
 367                lp.log_type = BFA_PL_LOG_TYPE_STRING;
 368                lp.misc = misc;
 369                strncpy(lp.log_entry.string_log, log_str,
 370                        BFA_PL_STRING_LOG_SZ - 1);
 371                lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
 372                bfa_plog_add(plog, &lp);
 373        }
 374}
 375
 376void
 377bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 378                enum bfa_plog_eid event,
 379                u16 misc, u32 *intarr, u32 num_ints)
 380{
 381        struct bfa_plog_rec_s  lp;
 382        u32 i;
 383
 384        if (num_ints > BFA_PL_INT_LOG_SZ)
 385                num_ints = BFA_PL_INT_LOG_SZ;
 386
 387        if (plog->plog_enabled) {
 388                memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 389                lp.mid = mid;
 390                lp.eid = event;
 391                lp.log_type = BFA_PL_LOG_TYPE_INT;
 392                lp.misc = misc;
 393
 394                for (i = 0; i < num_ints; i++)
 395                        lp.log_entry.int_log[i] = intarr[i];
 396
 397                lp.log_num_ints = (u8) num_ints;
 398
 399                bfa_plog_add(plog, &lp);
 400        }
 401}
 402
 403void
 404bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 405                        enum bfa_plog_eid event,
 406                        u16 misc, struct fchs_s *fchdr)
 407{
 408        struct bfa_plog_rec_s  lp;
 409        u32     *tmp_int = (u32 *) fchdr;
 410        u32     ints[BFA_PL_INT_LOG_SZ];
 411
 412        if (plog->plog_enabled) {
 413                memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 414
 415                ints[0] = tmp_int[0];
 416                ints[1] = tmp_int[1];
 417                ints[2] = tmp_int[4];
 418
 419                bfa_plog_intarr(plog, mid, event, misc, ints, 3);
 420        }
 421}
 422
 423void
 424bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 425                      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
 426                      u32 pld_w0)
 427{
 428        struct bfa_plog_rec_s  lp;
 429        u32     *tmp_int = (u32 *) fchdr;
 430        u32     ints[BFA_PL_INT_LOG_SZ];
 431
 432        if (plog->plog_enabled) {
 433                memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 434
 435                ints[0] = tmp_int[0];
 436                ints[1] = tmp_int[1];
 437                ints[2] = tmp_int[4];
 438                ints[3] = pld_w0;
 439
 440                bfa_plog_intarr(plog, mid, event, misc, ints, 4);
 441        }
 442}
 443
 444
 445/*
 446 *  fcxp_pvt BFA FCXP private functions
 447 */
 448
 449static void
 450claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
 451{
 452        u16     i;
 453        struct bfa_fcxp_s *fcxp;
 454
 455        fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
 456        memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
 457
 458        INIT_LIST_HEAD(&mod->fcxp_req_free_q);
 459        INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
 460        INIT_LIST_HEAD(&mod->fcxp_active_q);
 461        INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
 462        INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
 463
 464        mod->fcxp_list = fcxp;
 465
 466        for (i = 0; i < mod->num_fcxps; i++) {
 467                fcxp->fcxp_mod = mod;
 468                fcxp->fcxp_tag = i;
 469
 470                if (i < (mod->num_fcxps / 2)) {
 471                        list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
 472                        fcxp->req_rsp = BFA_TRUE;
 473                } else {
 474                        list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
 475                        fcxp->req_rsp = BFA_FALSE;
 476                }
 477
 478                bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
 479                fcxp->reqq_waiting = BFA_FALSE;
 480
 481                fcxp = fcxp + 1;
 482        }
 483
 484        bfa_mem_kva_curp(mod) = (void *)fcxp;
 485}
 486
 487static void
 488bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
 489                struct bfa_s *bfa)
 490{
 491        struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
 492        struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
 493        struct bfa_mem_dma_s *seg_ptr;
 494        u16     nsegs, idx, per_seg_fcxp;
 495        u16     num_fcxps = cfg->fwcfg.num_fcxp_reqs;
 496        u32     per_fcxp_sz;
 497
 498        if (num_fcxps == 0)
 499                return;
 500
 501        if (cfg->drvcfg.min_cfg)
 502                per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
 503        else
 504                per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
 505
 506        /* dma memory */
 507        nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
 508        per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
 509
 510        bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
 511                if (num_fcxps >= per_seg_fcxp) {
 512                        num_fcxps -= per_seg_fcxp;
 513                        bfa_mem_dma_setup(minfo, seg_ptr,
 514                                per_seg_fcxp * per_fcxp_sz);
 515                } else
 516                        bfa_mem_dma_setup(minfo, seg_ptr,
 517                                num_fcxps * per_fcxp_sz);
 518        }
 519
 520        /* kva memory */
 521        bfa_mem_kva_setup(minfo, fcxp_kva,
 522                cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
 523}
 524
 525static void
 526bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 527                struct bfa_pcidev_s *pcidev)
 528{
 529        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 530
 531        mod->bfa = bfa;
 532        mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
 533
 534        /*
 535         * Initialize FCXP request and response payload sizes.
 536         */
 537        mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
 538        if (!cfg->drvcfg.min_cfg)
 539                mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
 540
 541        INIT_LIST_HEAD(&mod->req_wait_q);
 542        INIT_LIST_HEAD(&mod->rsp_wait_q);
 543
 544        claim_fcxps_mem(mod);
 545}
 546
 547static void
 548bfa_fcxp_detach(struct bfa_s *bfa)
 549{
 550}
 551
 552static void
 553bfa_fcxp_start(struct bfa_s *bfa)
 554{
 555}
 556
 557static void
 558bfa_fcxp_stop(struct bfa_s *bfa)
 559{
 560}
 561
 562static void
 563bfa_fcxp_iocdisable(struct bfa_s *bfa)
 564{
 565        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 566        struct bfa_fcxp_s *fcxp;
 567        struct list_head              *qe, *qen;
 568
 569        /* Enqueue unused fcxp resources to free_q */
 570        list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
 571        list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
 572
 573        list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
 574                fcxp = (struct bfa_fcxp_s *) qe;
 575                if (fcxp->caller == NULL) {
 576                        fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
 577                                        BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
 578                        bfa_fcxp_free(fcxp);
 579                } else {
 580                        fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
 581                        bfa_cb_queue(bfa, &fcxp->hcb_qe,
 582                                     __bfa_fcxp_send_cbfn, fcxp);
 583                }
 584        }
 585}
 586
 587static struct bfa_fcxp_s *
 588bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
 589{
 590        struct bfa_fcxp_s *fcxp;
 591
 592        if (req)
 593                bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
 594        else
 595                bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
 596
 597        if (fcxp)
 598                list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
 599
 600        return fcxp;
 601}
 602
 603static void
 604bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
 605               struct bfa_s *bfa,
 606               u8 *use_ibuf,
 607               u32 *nr_sgles,
 608               bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
 609               bfa_fcxp_get_sglen_t *r_sglen_cbfn,
 610               struct list_head *r_sgpg_q,
 611               int n_sgles,
 612               bfa_fcxp_get_sgaddr_t sga_cbfn,
 613               bfa_fcxp_get_sglen_t sglen_cbfn)
 614{
 615
 616        WARN_ON(bfa == NULL);
 617
 618        bfa_trc(bfa, fcxp->fcxp_tag);
 619
 620        if (n_sgles == 0) {
 621                *use_ibuf = 1;
 622        } else {
 623                WARN_ON(*sga_cbfn == NULL);
 624                WARN_ON(*sglen_cbfn == NULL);
 625
 626                *use_ibuf = 0;
 627                *r_sga_cbfn = sga_cbfn;
 628                *r_sglen_cbfn = sglen_cbfn;
 629
 630                *nr_sgles = n_sgles;
 631
 632                /*
 633                 * alloc required sgpgs
 634                 */
 635                if (n_sgles > BFI_SGE_INLINE)
 636                        WARN_ON(1);
 637        }
 638
 639}
 640
 641static void
 642bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
 643               void *caller, struct bfa_s *bfa, int nreq_sgles,
 644               int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
 645               bfa_fcxp_get_sglen_t req_sglen_cbfn,
 646               bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
 647               bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
 648{
 649
 650        WARN_ON(bfa == NULL);
 651
 652        bfa_trc(bfa, fcxp->fcxp_tag);
 653
 654        fcxp->caller = caller;
 655
 656        bfa_fcxp_init_reqrsp(fcxp, bfa,
 657                &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
 658                &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
 659                nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
 660
 661        bfa_fcxp_init_reqrsp(fcxp, bfa,
 662                &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
 663                &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
 664                nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
 665
 666}
 667
 668static void
 669bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
 670{
 671        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 672        struct bfa_fcxp_wqe_s *wqe;
 673
 674        if (fcxp->req_rsp)
 675                bfa_q_deq(&mod->req_wait_q, &wqe);
 676        else
 677                bfa_q_deq(&mod->rsp_wait_q, &wqe);
 678
 679        if (wqe) {
 680                bfa_trc(mod->bfa, fcxp->fcxp_tag);
 681
 682                bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
 683                        wqe->nrsp_sgles, wqe->req_sga_cbfn,
 684                        wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
 685                        wqe->rsp_sglen_cbfn);
 686
 687                wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
 688                return;
 689        }
 690
 691        WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
 692        list_del(&fcxp->qe);
 693
 694        if (fcxp->req_rsp)
 695                list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
 696        else
 697                list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
 698}
 699
 700static void
 701bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
 702                   bfa_status_t req_status, u32 rsp_len,
 703                   u32 resid_len, struct fchs_s *rsp_fchs)
 704{
 705        /* discarded fcxp completion */
 706}
 707
 708static void
 709__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
 710{
 711        struct bfa_fcxp_s *fcxp = cbarg;
 712
 713        if (complete) {
 714                fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
 715                                fcxp->rsp_status, fcxp->rsp_len,
 716                                fcxp->residue_len, &fcxp->rsp_fchs);
 717        } else {
 718                bfa_fcxp_free(fcxp);
 719        }
 720}
 721
 722static void
 723hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
 724{
 725        struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
 726        struct bfa_fcxp_s       *fcxp;
 727        u16             fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
 728
 729        bfa_trc(bfa, fcxp_tag);
 730
 731        fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
 732
 733        /*
 734         * @todo f/w should not set residue to non-0 when everything
 735         *       is received.
 736         */
 737        if (fcxp_rsp->req_status == BFA_STATUS_OK)
 738                fcxp_rsp->residue_len = 0;
 739        else
 740                fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
 741
 742        fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
 743
 744        WARN_ON(fcxp->send_cbfn == NULL);
 745
 746        hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
 747
 748        if (fcxp->send_cbfn != NULL) {
 749                bfa_trc(mod->bfa, (NULL == fcxp->caller));
 750                if (fcxp->caller == NULL) {
 751                        fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
 752                                        fcxp_rsp->req_status, fcxp_rsp->rsp_len,
 753                                        fcxp_rsp->residue_len, &fcxp_rsp->fchs);
 754                        /*
 755                         * fcxp automatically freed on return from the callback
 756                         */
 757                        bfa_fcxp_free(fcxp);
 758                } else {
 759                        fcxp->rsp_status = fcxp_rsp->req_status;
 760                        fcxp->rsp_len = fcxp_rsp->rsp_len;
 761                        fcxp->residue_len = fcxp_rsp->residue_len;
 762                        fcxp->rsp_fchs = fcxp_rsp->fchs;
 763
 764                        bfa_cb_queue(bfa, &fcxp->hcb_qe,
 765                                        __bfa_fcxp_send_cbfn, fcxp);
 766                }
 767        } else {
 768                bfa_trc(bfa, (NULL == fcxp->send_cbfn));
 769        }
 770}
 771
 772static void
 773hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
 774                 struct fchs_s *fchs)
 775{
 776        /*
 777         * TODO: TX ox_id
 778         */
 779        if (reqlen > 0) {
 780                if (fcxp->use_ireqbuf) {
 781                        u32     pld_w0 =
 782                                *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
 783
 784                        bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
 785                                        BFA_PL_EID_TX,
 786                                        reqlen + sizeof(struct fchs_s), fchs,
 787                                        pld_w0);
 788                } else {
 789                        bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
 790                                        BFA_PL_EID_TX,
 791                                        reqlen + sizeof(struct fchs_s),
 792                                        fchs);
 793                }
 794        } else {
 795                bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
 796                               reqlen + sizeof(struct fchs_s), fchs);
 797        }
 798}
 799
 800static void
 801hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
 802                 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
 803{
 804        if (fcxp_rsp->rsp_len > 0) {
 805                if (fcxp->use_irspbuf) {
 806                        u32     pld_w0 =
 807                                *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
 808
 809                        bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
 810                                              BFA_PL_EID_RX,
 811                                              (u16) fcxp_rsp->rsp_len,
 812                                              &fcxp_rsp->fchs, pld_w0);
 813                } else {
 814                        bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
 815                                       BFA_PL_EID_RX,
 816                                       (u16) fcxp_rsp->rsp_len,
 817                                       &fcxp_rsp->fchs);
 818                }
 819        } else {
 820                bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
 821                               (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
 822        }
 823}
 824
 825/*
 826 * Handler to resume sending fcxp when space in available in cpe queue.
 827 */
 828static void
 829bfa_fcxp_qresume(void *cbarg)
 830{
 831        struct bfa_fcxp_s               *fcxp = cbarg;
 832        struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
 833        struct bfi_fcxp_send_req_s      *send_req;
 834
 835        fcxp->reqq_waiting = BFA_FALSE;
 836        send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
 837        bfa_fcxp_queue(fcxp, send_req);
 838}
 839
 840/*
 841 * Queue fcxp send request to foimrware.
 842 */
 843static void
 844bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
 845{
 846        struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
 847        struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
 848        struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
 849        struct bfa_rport_s              *rport = reqi->bfa_rport;
 850
 851        bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
 852                    bfa_fn_lpu(bfa));
 853
 854        send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
 855        if (rport) {
 856                send_req->rport_fw_hndl = rport->fw_handle;
 857                send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
 858                if (send_req->max_frmsz == 0)
 859                        send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
 860        } else {
 861                send_req->rport_fw_hndl = 0;
 862                send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
 863        }
 864
 865        send_req->vf_id = cpu_to_be16(reqi->vf_id);
 866        send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
 867        send_req->class = reqi->class;
 868        send_req->rsp_timeout = rspi->rsp_timeout;
 869        send_req->cts = reqi->cts;
 870        send_req->fchs = reqi->fchs;
 871
 872        send_req->req_len = cpu_to_be32(reqi->req_tot_len);
 873        send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
 874
 875        /*
 876         * setup req sgles
 877         */
 878        if (fcxp->use_ireqbuf == 1) {
 879                bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
 880                                        BFA_FCXP_REQ_PLD_PA(fcxp));
 881        } else {
 882                if (fcxp->nreq_sgles > 0) {
 883                        WARN_ON(fcxp->nreq_sgles != 1);
 884                        bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
 885                                fcxp->req_sga_cbfn(fcxp->caller, 0));
 886                } else {
 887                        WARN_ON(reqi->req_tot_len != 0);
 888                        bfa_alen_set(&send_req->rsp_alen, 0, 0);
 889                }
 890        }
 891
 892        /*
 893         * setup rsp sgles
 894         */
 895        if (fcxp->use_irspbuf == 1) {
 896                WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
 897
 898                bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
 899                                        BFA_FCXP_RSP_PLD_PA(fcxp));
 900        } else {
 901                if (fcxp->nrsp_sgles > 0) {
 902                        WARN_ON(fcxp->nrsp_sgles != 1);
 903                        bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
 904                                fcxp->rsp_sga_cbfn(fcxp->caller, 0));
 905
 906                } else {
 907                        WARN_ON(rspi->rsp_maxlen != 0);
 908                        bfa_alen_set(&send_req->rsp_alen, 0, 0);
 909                }
 910        }
 911
 912        hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
 913
 914        bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
 915
 916        bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
 917        bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
 918}
 919
 920/*
 921 * Allocate an FCXP instance to send a response or to send a request
 922 * that has a response. Request/response buffers are allocated by caller.
 923 *
 924 * @param[in]   bfa             BFA bfa instance
 925 * @param[in]   nreq_sgles      Number of SG elements required for request
 926 *                              buffer. 0, if fcxp internal buffers are used.
 927 *                              Use bfa_fcxp_get_reqbuf() to get the
 928 *                              internal req buffer.
 929 * @param[in]   req_sgles       SG elements describing request buffer. Will be
 930 *                              copied in by BFA and hence can be freed on
 931 *                              return from this function.
 932 * @param[in]   get_req_sga     function ptr to be called to get a request SG
 933 *                              Address (given the sge index).
 934 * @param[in]   get_req_sglen   function ptr to be called to get a request SG
 935 *                              len (given the sge index).
 936 * @param[in]   get_rsp_sga     function ptr to be called to get a response SG
 937 *                              Address (given the sge index).
 938 * @param[in]   get_rsp_sglen   function ptr to be called to get a response SG
 939 *                              len (given the sge index).
 940 * @param[in]   req             Allocated FCXP is used to send req or rsp?
 941 *                              request - BFA_TRUE, response - BFA_FALSE
 942 *
 943 * @return FCXP instance. NULL on failure.
 944 */
 945struct bfa_fcxp_s *
 946bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
 947                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
 948                bfa_fcxp_get_sglen_t req_sglen_cbfn,
 949                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
 950                bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
 951{
 952        struct bfa_fcxp_s *fcxp = NULL;
 953
 954        WARN_ON(bfa == NULL);
 955
 956        fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
 957        if (fcxp == NULL)
 958                return NULL;
 959
 960        bfa_trc(bfa, fcxp->fcxp_tag);
 961
 962        bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
 963                        req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
 964
 965        return fcxp;
 966}
 967
 968/*
 969 * Get the internal request buffer pointer
 970 *
 971 * @param[in]   fcxp    BFA fcxp pointer
 972 *
 973 * @return              pointer to the internal request buffer
 974 */
 975void *
 976bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
 977{
 978        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 979        void    *reqbuf;
 980
 981        WARN_ON(fcxp->use_ireqbuf != 1);
 982        reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
 983                                mod->req_pld_sz + mod->rsp_pld_sz);
 984        return reqbuf;
 985}
 986
 987u32
 988bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
 989{
 990        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 991
 992        return mod->req_pld_sz;
 993}
 994
 995/*
 996 * Get the internal response buffer pointer
 997 *
 998 * @param[in]   fcxp    BFA fcxp pointer
 999 *
1000 * @return              pointer to the internal request buffer
1001 */
1002void *
1003bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1004{
1005        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1006        void    *fcxp_buf;
1007
1008        WARN_ON(fcxp->use_irspbuf != 1);
1009
1010        fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
1011                                mod->req_pld_sz + mod->rsp_pld_sz);
1012
1013        /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
1014        return ((u8 *) fcxp_buf) + mod->req_pld_sz;
1015}
1016
1017/*
1018 * Free the BFA FCXP
1019 *
1020 * @param[in]   fcxp                    BFA fcxp pointer
1021 *
1022 * @return              void
1023 */
1024void
1025bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1026{
1027        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1028
1029        WARN_ON(fcxp == NULL);
1030        bfa_trc(mod->bfa, fcxp->fcxp_tag);
1031        bfa_fcxp_put(fcxp);
1032}
1033
1034/*
1035 * Send a FCXP request
1036 *
1037 * @param[in]   fcxp    BFA fcxp pointer
1038 * @param[in]   rport   BFA rport pointer. Could be left NULL for WKA rports
1039 * @param[in]   vf_id   virtual Fabric ID
1040 * @param[in]   lp_tag  lport tag
1041 * @param[in]   cts     use Continuous sequence
1042 * @param[in]   cos     fc Class of Service
1043 * @param[in]   reqlen  request length, does not include FCHS length
1044 * @param[in]   fchs    fc Header Pointer. The header content will be copied
1045 *                      in by BFA.
1046 *
1047 * @param[in]   cbfn    call back function to be called on receiving
1048 *                                                              the response
1049 * @param[in]   cbarg   arg for cbfn
1050 * @param[in]   rsp_timeout
1051 *                      response timeout
1052 *
1053 * @return              bfa_status_t
1054 */
1055void
1056bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1057              u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1058              u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1059              void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1060{
1061        struct bfa_s                    *bfa  = fcxp->fcxp_mod->bfa;
1062        struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
1063        struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
1064        struct bfi_fcxp_send_req_s      *send_req;
1065
1066        bfa_trc(bfa, fcxp->fcxp_tag);
1067
1068        /*
1069         * setup request/response info
1070         */
1071        reqi->bfa_rport = rport;
1072        reqi->vf_id = vf_id;
1073        reqi->lp_tag = lp_tag;
1074        reqi->class = cos;
1075        rspi->rsp_timeout = rsp_timeout;
1076        reqi->cts = cts;
1077        reqi->fchs = *fchs;
1078        reqi->req_tot_len = reqlen;
1079        rspi->rsp_maxlen = rsp_maxlen;
1080        fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1081        fcxp->send_cbarg = cbarg;
1082
1083        /*
1084         * If no room in CPE queue, wait for space in request queue
1085         */
1086        send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1087        if (!send_req) {
1088                bfa_trc(bfa, fcxp->fcxp_tag);
1089                fcxp->reqq_waiting = BFA_TRUE;
1090                bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1091                return;
1092        }
1093
1094        bfa_fcxp_queue(fcxp, send_req);
1095}
1096
1097/*
1098 * Abort a BFA FCXP
1099 *
1100 * @param[in]   fcxp    BFA fcxp pointer
1101 *
1102 * @return              void
1103 */
1104bfa_status_t
1105bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1106{
1107        bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1108        WARN_ON(1);
1109        return BFA_STATUS_OK;
1110}
1111
1112void
1113bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1114               bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1115               void *caller, int nreq_sgles,
1116               int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1117               bfa_fcxp_get_sglen_t req_sglen_cbfn,
1118               bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1119               bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
1120{
1121        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1122
1123        if (req)
1124                WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1125        else
1126                WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
1127
1128        wqe->alloc_cbfn = alloc_cbfn;
1129        wqe->alloc_cbarg = alloc_cbarg;
1130        wqe->caller = caller;
1131        wqe->bfa = bfa;
1132        wqe->nreq_sgles = nreq_sgles;
1133        wqe->nrsp_sgles = nrsp_sgles;
1134        wqe->req_sga_cbfn = req_sga_cbfn;
1135        wqe->req_sglen_cbfn = req_sglen_cbfn;
1136        wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1137        wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1138
1139        if (req)
1140                list_add_tail(&wqe->qe, &mod->req_wait_q);
1141        else
1142                list_add_tail(&wqe->qe, &mod->rsp_wait_q);
1143}
1144
1145void
1146bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1147{
1148        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1149
1150        WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1151                !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
1152        list_del(&wqe->qe);
1153}
1154
1155void
1156bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1157{
1158        /*
1159         * If waiting for room in request queue, cancel reqq wait
1160         * and free fcxp.
1161         */
1162        if (fcxp->reqq_waiting) {
1163                fcxp->reqq_waiting = BFA_FALSE;
1164                bfa_reqq_wcancel(&fcxp->reqq_wqe);
1165                bfa_fcxp_free(fcxp);
1166                return;
1167        }
1168
1169        fcxp->send_cbfn = bfa_fcxp_null_comp;
1170}
1171
1172void
1173bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1174{
1175        switch (msg->mhdr.msg_id) {
1176        case BFI_FCXP_I2H_SEND_RSP:
1177                hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1178                break;
1179
1180        default:
1181                bfa_trc(bfa, msg->mhdr.msg_id);
1182                WARN_ON(1);
1183        }
1184}
1185
1186u32
1187bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1188{
1189        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1190
1191        return mod->rsp_pld_sz;
1192}
1193
1194void
1195bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1196{
1197        struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
1198        struct list_head        *qe;
1199        int     i;
1200
1201        for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1202                if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1203                        bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1204                        list_add_tail(qe, &mod->fcxp_req_unused_q);
1205                } else {
1206                        bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1207                        list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1208                }
1209        }
1210}
1211
1212/*
1213 *  BFA LPS state machine functions
1214 */
1215
1216/*
1217 * Init state -- no login
1218 */
1219static void
1220bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1221{
1222        bfa_trc(lps->bfa, lps->bfa_tag);
1223        bfa_trc(lps->bfa, event);
1224
1225        switch (event) {
1226        case BFA_LPS_SM_LOGIN:
1227                if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1228                        bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1229                        bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1230                } else {
1231                        bfa_sm_set_state(lps, bfa_lps_sm_login);
1232                        bfa_lps_send_login(lps);
1233                }
1234
1235                if (lps->fdisc)
1236                        bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1237                                BFA_PL_EID_LOGIN, 0, "FDISC Request");
1238                else
1239                        bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1240                                BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1241                break;
1242
1243        case BFA_LPS_SM_LOGOUT:
1244                bfa_lps_logout_comp(lps);
1245                break;
1246
1247        case BFA_LPS_SM_DELETE:
1248                bfa_lps_free(lps);
1249                break;
1250
1251        case BFA_LPS_SM_RX_CVL:
1252        case BFA_LPS_SM_OFFLINE:
1253                break;
1254
1255        case BFA_LPS_SM_FWRSP:
1256                /*
1257                 * Could happen when fabric detects loopback and discards
1258                 * the lps request. Fw will eventually sent out the timeout
1259                 * Just ignore
1260                 */
1261                break;
1262        case BFA_LPS_SM_SET_N2N_PID:
1263                /*
1264                 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1265                 * this event. Ignore this event.
1266                 */
1267                break;
1268
1269        default:
1270                bfa_sm_fault(lps->bfa, event);
1271        }
1272}
1273
1274/*
1275 * login is in progress -- awaiting response from firmware
1276 */
1277static void
1278bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1279{
1280        bfa_trc(lps->bfa, lps->bfa_tag);
1281        bfa_trc(lps->bfa, event);
1282
1283        switch (event) {
1284        case BFA_LPS_SM_FWRSP:
1285                if (lps->status == BFA_STATUS_OK) {
1286                        bfa_sm_set_state(lps, bfa_lps_sm_online);
1287                        if (lps->fdisc)
1288                                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1289                                        BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1290                        else
1291                                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1292                                        BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1293                        /* If N2N, send the assigned PID to FW */
1294                        bfa_trc(lps->bfa, lps->fport);
1295                        bfa_trc(lps->bfa, lps->lp_pid);
1296
1297                        if (!lps->fport && lps->lp_pid)
1298                                bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1299                } else {
1300                        bfa_sm_set_state(lps, bfa_lps_sm_init);
1301                        if (lps->fdisc)
1302                                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1303                                        BFA_PL_EID_LOGIN, 0,
1304                                        "FDISC Fail (RJT or timeout)");
1305                        else
1306                                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1307                                        BFA_PL_EID_LOGIN, 0,
1308                                        "FLOGI Fail (RJT or timeout)");
1309                }
1310                bfa_lps_login_comp(lps);
1311                break;
1312
1313        case BFA_LPS_SM_OFFLINE:
1314        case BFA_LPS_SM_DELETE:
1315                bfa_sm_set_state(lps, bfa_lps_sm_init);
1316                break;
1317
1318        case BFA_LPS_SM_SET_N2N_PID:
1319                bfa_trc(lps->bfa, lps->fport);
1320                bfa_trc(lps->bfa, lps->lp_pid);
1321                break;
1322
1323        default:
1324                bfa_sm_fault(lps->bfa, event);
1325        }
1326}
1327
1328/*
1329 * login pending - awaiting space in request queue
1330 */
1331static void
1332bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1333{
1334        bfa_trc(lps->bfa, lps->bfa_tag);
1335        bfa_trc(lps->bfa, event);
1336
1337        switch (event) {
1338        case BFA_LPS_SM_RESUME:
1339                bfa_sm_set_state(lps, bfa_lps_sm_login);
1340                bfa_lps_send_login(lps);
1341                break;
1342
1343        case BFA_LPS_SM_OFFLINE:
1344        case BFA_LPS_SM_DELETE:
1345                bfa_sm_set_state(lps, bfa_lps_sm_init);
1346                bfa_reqq_wcancel(&lps->wqe);
1347                break;
1348
1349        case BFA_LPS_SM_RX_CVL:
1350                /*
1351                 * Login was not even sent out; so when getting out
1352                 * of this state, it will appear like a login retry
1353                 * after Clear virtual link
1354                 */
1355                break;
1356
1357        default:
1358                bfa_sm_fault(lps->bfa, event);
1359        }
1360}
1361
1362/*
1363 * login complete
1364 */
1365static void
1366bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1367{
1368        bfa_trc(lps->bfa, lps->bfa_tag);
1369        bfa_trc(lps->bfa, event);
1370
1371        switch (event) {
1372        case BFA_LPS_SM_LOGOUT:
1373                if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1374                        bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1375                        bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1376                } else {
1377                        bfa_sm_set_state(lps, bfa_lps_sm_logout);
1378                        bfa_lps_send_logout(lps);
1379                }
1380                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1381                        BFA_PL_EID_LOGO, 0, "Logout");
1382                break;
1383
1384        case BFA_LPS_SM_RX_CVL:
1385                bfa_sm_set_state(lps, bfa_lps_sm_init);
1386
1387                /* Let the vport module know about this event */
1388                bfa_lps_cvl_event(lps);
1389                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1390                        BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1391                break;
1392
1393        case BFA_LPS_SM_SET_N2N_PID:
1394                if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1395                        bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1396                        bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1397                } else
1398                        bfa_lps_send_set_n2n_pid(lps);
1399                break;
1400
1401        case BFA_LPS_SM_OFFLINE:
1402        case BFA_LPS_SM_DELETE:
1403                bfa_sm_set_state(lps, bfa_lps_sm_init);
1404                break;
1405
1406        default:
1407                bfa_sm_fault(lps->bfa, event);
1408        }
1409}
1410
1411/*
1412 * login complete
1413 */
1414static void
1415bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1416{
1417        bfa_trc(lps->bfa, lps->bfa_tag);
1418        bfa_trc(lps->bfa, event);
1419
1420        switch (event) {
1421        case BFA_LPS_SM_RESUME:
1422                bfa_sm_set_state(lps, bfa_lps_sm_online);
1423                bfa_lps_send_set_n2n_pid(lps);
1424                break;
1425
1426        case BFA_LPS_SM_LOGOUT:
1427                bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1428                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1429                        BFA_PL_EID_LOGO, 0, "Logout");
1430                break;
1431
1432        case BFA_LPS_SM_RX_CVL:
1433                bfa_sm_set_state(lps, bfa_lps_sm_init);
1434                bfa_reqq_wcancel(&lps->wqe);
1435
1436                /* Let the vport module know about this event */
1437                bfa_lps_cvl_event(lps);
1438                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1439                        BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1440                break;
1441
1442        case BFA_LPS_SM_OFFLINE:
1443        case BFA_LPS_SM_DELETE:
1444                bfa_sm_set_state(lps, bfa_lps_sm_init);
1445                bfa_reqq_wcancel(&lps->wqe);
1446                break;
1447
1448        default:
1449                bfa_sm_fault(lps->bfa, event);
1450        }
1451}
1452
1453/*
1454 * logout in progress - awaiting firmware response
1455 */
1456static void
1457bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1458{
1459        bfa_trc(lps->bfa, lps->bfa_tag);
1460        bfa_trc(lps->bfa, event);
1461
1462        switch (event) {
1463        case BFA_LPS_SM_FWRSP:
1464        case BFA_LPS_SM_OFFLINE:
1465                bfa_sm_set_state(lps, bfa_lps_sm_init);
1466                bfa_lps_logout_comp(lps);
1467                break;
1468
1469        case BFA_LPS_SM_DELETE:
1470                bfa_sm_set_state(lps, bfa_lps_sm_init);
1471                break;
1472
1473        default:
1474                bfa_sm_fault(lps->bfa, event);
1475        }
1476}
1477
1478/*
1479 * logout pending -- awaiting space in request queue
1480 */
1481static void
1482bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1483{
1484        bfa_trc(lps->bfa, lps->bfa_tag);
1485        bfa_trc(lps->bfa, event);
1486
1487        switch (event) {
1488        case BFA_LPS_SM_RESUME:
1489                bfa_sm_set_state(lps, bfa_lps_sm_logout);
1490                bfa_lps_send_logout(lps);
1491                break;
1492
1493        case BFA_LPS_SM_OFFLINE:
1494        case BFA_LPS_SM_DELETE:
1495                bfa_sm_set_state(lps, bfa_lps_sm_init);
1496                bfa_reqq_wcancel(&lps->wqe);
1497                break;
1498
1499        default:
1500                bfa_sm_fault(lps->bfa, event);
1501        }
1502}
1503
1504
1505
1506/*
1507 *  lps_pvt BFA LPS private functions
1508 */
1509
1510/*
1511 * return memory requirement
1512 */
1513static void
1514bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1515                struct bfa_s *bfa)
1516{
1517        struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1518
1519        if (cfg->drvcfg.min_cfg)
1520                bfa_mem_kva_setup(minfo, lps_kva,
1521                        sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1522        else
1523                bfa_mem_kva_setup(minfo, lps_kva,
1524                        sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1525}
1526
1527/*
1528 * bfa module attach at initialization time
1529 */
1530static void
1531bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1532        struct bfa_pcidev_s *pcidev)
1533{
1534        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1535        struct bfa_lps_s        *lps;
1536        int                     i;
1537
1538        mod->num_lps = BFA_LPS_MAX_LPORTS;
1539        if (cfg->drvcfg.min_cfg)
1540                mod->num_lps = BFA_LPS_MIN_LPORTS;
1541        else
1542                mod->num_lps = BFA_LPS_MAX_LPORTS;
1543        mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1544
1545        bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1546
1547        INIT_LIST_HEAD(&mod->lps_free_q);
1548        INIT_LIST_HEAD(&mod->lps_active_q);
1549        INIT_LIST_HEAD(&mod->lps_login_q);
1550
1551        for (i = 0; i < mod->num_lps; i++, lps++) {
1552                lps->bfa        = bfa;
1553                lps->bfa_tag    = (u8) i;
1554                lps->reqq       = BFA_REQQ_LPS;
1555                bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1556                list_add_tail(&lps->qe, &mod->lps_free_q);
1557        }
1558}
1559
1560static void
1561bfa_lps_detach(struct bfa_s *bfa)
1562{
1563}
1564
1565static void
1566bfa_lps_start(struct bfa_s *bfa)
1567{
1568}
1569
1570static void
1571bfa_lps_stop(struct bfa_s *bfa)
1572{
1573}
1574
1575/*
1576 * IOC in disabled state -- consider all lps offline
1577 */
1578static void
1579bfa_lps_iocdisable(struct bfa_s *bfa)
1580{
1581        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1582        struct bfa_lps_s        *lps;
1583        struct list_head                *qe, *qen;
1584
1585        list_for_each_safe(qe, qen, &mod->lps_active_q) {
1586                lps = (struct bfa_lps_s *) qe;
1587                bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1588        }
1589        list_for_each_safe(qe, qen, &mod->lps_login_q) {
1590                lps = (struct bfa_lps_s *) qe;
1591                bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1592        }
1593        list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1594}
1595
1596/*
1597 * Firmware login response
1598 */
1599static void
1600bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1601{
1602        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1603        struct bfa_lps_s        *lps;
1604
1605        WARN_ON(rsp->bfa_tag >= mod->num_lps);
1606        lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1607
1608        lps->status = rsp->status;
1609        switch (rsp->status) {
1610        case BFA_STATUS_OK:
1611                lps->fw_tag     = rsp->fw_tag;
1612                lps->fport      = rsp->f_port;
1613                if (lps->fport)
1614                        lps->lp_pid = rsp->lp_pid;
1615                lps->npiv_en    = rsp->npiv_en;
1616                lps->pr_bbcred  = be16_to_cpu(rsp->bb_credit);
1617                lps->pr_pwwn    = rsp->port_name;
1618                lps->pr_nwwn    = rsp->node_name;
1619                lps->auth_req   = rsp->auth_req;
1620                lps->lp_mac     = rsp->lp_mac;
1621                lps->brcd_switch = rsp->brcd_switch;
1622                lps->fcf_mac    = rsp->fcf_mac;
1623
1624                break;
1625
1626        case BFA_STATUS_FABRIC_RJT:
1627                lps->lsrjt_rsn = rsp->lsrjt_rsn;
1628                lps->lsrjt_expl = rsp->lsrjt_expl;
1629
1630                break;
1631
1632        case BFA_STATUS_EPROTOCOL:
1633                lps->ext_status = rsp->ext_status;
1634
1635                break;
1636
1637        case BFA_STATUS_VPORT_MAX:
1638                if (rsp->ext_status)
1639                        bfa_lps_no_res(lps, rsp->ext_status);
1640                break;
1641
1642        default:
1643                /* Nothing to do with other status */
1644                break;
1645        }
1646
1647        list_del(&lps->qe);
1648        list_add_tail(&lps->qe, &mod->lps_active_q);
1649        bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1650}
1651
1652static void
1653bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1654{
1655        struct bfa_s            *bfa = first_lps->bfa;
1656        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1657        struct list_head        *qe, *qe_next;
1658        struct bfa_lps_s        *lps;
1659
1660        bfa_trc(bfa, count);
1661
1662        qe = bfa_q_next(first_lps);
1663
1664        while (count && qe) {
1665                qe_next = bfa_q_next(qe);
1666                lps = (struct bfa_lps_s *)qe;
1667                bfa_trc(bfa, lps->bfa_tag);
1668                lps->status = first_lps->status;
1669                list_del(&lps->qe);
1670                list_add_tail(&lps->qe, &mod->lps_active_q);
1671                bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1672                qe = qe_next;
1673                count--;
1674        }
1675}
1676
1677/*
1678 * Firmware logout response
1679 */
1680static void
1681bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1682{
1683        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1684        struct bfa_lps_s        *lps;
1685
1686        WARN_ON(rsp->bfa_tag >= mod->num_lps);
1687        lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1688
1689        bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1690}
1691
1692/*
1693 * Firmware received a Clear virtual link request (for FCoE)
1694 */
1695static void
1696bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1697{
1698        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1699        struct bfa_lps_s        *lps;
1700
1701        lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1702
1703        bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1704}
1705
1706/*
1707 * Space is available in request queue, resume queueing request to firmware.
1708 */
1709static void
1710bfa_lps_reqq_resume(void *lps_arg)
1711{
1712        struct bfa_lps_s        *lps = lps_arg;
1713
1714        bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1715}
1716
1717/*
1718 * lps is freed -- triggered by vport delete
1719 */
1720static void
1721bfa_lps_free(struct bfa_lps_s *lps)
1722{
1723        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1724
1725        lps->lp_pid = 0;
1726        list_del(&lps->qe);
1727        list_add_tail(&lps->qe, &mod->lps_free_q);
1728}
1729
1730/*
1731 * send login request to firmware
1732 */
1733static void
1734bfa_lps_send_login(struct bfa_lps_s *lps)
1735{
1736        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1737        struct bfi_lps_login_req_s      *m;
1738
1739        m = bfa_reqq_next(lps->bfa, lps->reqq);
1740        WARN_ON(!m);
1741
1742        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1743                bfa_fn_lpu(lps->bfa));
1744
1745        m->bfa_tag      = lps->bfa_tag;
1746        m->alpa         = lps->alpa;
1747        m->pdu_size     = cpu_to_be16(lps->pdusz);
1748        m->pwwn         = lps->pwwn;
1749        m->nwwn         = lps->nwwn;
1750        m->fdisc        = lps->fdisc;
1751        m->auth_en      = lps->auth_en;
1752
1753        bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1754        list_del(&lps->qe);
1755        list_add_tail(&lps->qe, &mod->lps_login_q);
1756}
1757
1758/*
1759 * send logout request to firmware
1760 */
1761static void
1762bfa_lps_send_logout(struct bfa_lps_s *lps)
1763{
1764        struct bfi_lps_logout_req_s *m;
1765
1766        m = bfa_reqq_next(lps->bfa, lps->reqq);
1767        WARN_ON(!m);
1768
1769        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1770                bfa_fn_lpu(lps->bfa));
1771
1772        m->fw_tag = lps->fw_tag;
1773        m->port_name = lps->pwwn;
1774        bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1775}
1776
1777/*
1778 * send n2n pid set request to firmware
1779 */
1780static void
1781bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1782{
1783        struct bfi_lps_n2n_pid_req_s *m;
1784
1785        m = bfa_reqq_next(lps->bfa, lps->reqq);
1786        WARN_ON(!m);
1787
1788        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1789                bfa_fn_lpu(lps->bfa));
1790
1791        m->fw_tag = lps->fw_tag;
1792        m->lp_pid = lps->lp_pid;
1793        bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1794}
1795
1796/*
1797 * Indirect login completion handler for non-fcs
1798 */
1799static void
1800bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1801{
1802        struct bfa_lps_s *lps   = arg;
1803
1804        if (!complete)
1805                return;
1806
1807        if (lps->fdisc)
1808                bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1809        else
1810                bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1811}
1812
1813/*
1814 * Login completion handler -- direct call for fcs, queue for others
1815 */
1816static void
1817bfa_lps_login_comp(struct bfa_lps_s *lps)
1818{
1819        if (!lps->bfa->fcs) {
1820                bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1821                        lps);
1822                return;
1823        }
1824
1825        if (lps->fdisc)
1826                bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1827        else
1828                bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1829}
1830
1831/*
1832 * Indirect logout completion handler for non-fcs
1833 */
1834static void
1835bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1836{
1837        struct bfa_lps_s *lps   = arg;
1838
1839        if (!complete)
1840                return;
1841
1842        if (lps->fdisc)
1843                bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1844        else
1845                bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
1846}
1847
1848/*
1849 * Logout completion handler -- direct call for fcs, queue for others
1850 */
1851static void
1852bfa_lps_logout_comp(struct bfa_lps_s *lps)
1853{
1854        if (!lps->bfa->fcs) {
1855                bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1856                        lps);
1857                return;
1858        }
1859        if (lps->fdisc)
1860                bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1861}
1862
1863/*
1864 * Clear virtual link completion handler for non-fcs
1865 */
1866static void
1867bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1868{
1869        struct bfa_lps_s *lps   = arg;
1870
1871        if (!complete)
1872                return;
1873
1874        /* Clear virtual link to base port will result in link down */
1875        if (lps->fdisc)
1876                bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1877}
1878
1879/*
1880 * Received Clear virtual link event --direct call for fcs,
1881 * queue for others
1882 */
1883static void
1884bfa_lps_cvl_event(struct bfa_lps_s *lps)
1885{
1886        if (!lps->bfa->fcs) {
1887                bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1888                        lps);
1889                return;
1890        }
1891
1892        /* Clear virtual link to base port will result in link down */
1893        if (lps->fdisc)
1894                bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1895}
1896
1897
1898
1899/*
1900 *  lps_public BFA LPS public functions
1901 */
1902
1903u32
1904bfa_lps_get_max_vport(struct bfa_s *bfa)
1905{
1906        if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1907                return BFA_LPS_MAX_VPORTS_SUPP_CT;
1908        else
1909                return BFA_LPS_MAX_VPORTS_SUPP_CB;
1910}
1911
1912/*
1913 * Allocate a lport srvice tag.
1914 */
1915struct bfa_lps_s  *
1916bfa_lps_alloc(struct bfa_s *bfa)
1917{
1918        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1919        struct bfa_lps_s        *lps = NULL;
1920
1921        bfa_q_deq(&mod->lps_free_q, &lps);
1922
1923        if (lps == NULL)
1924                return NULL;
1925
1926        list_add_tail(&lps->qe, &mod->lps_active_q);
1927
1928        bfa_sm_set_state(lps, bfa_lps_sm_init);
1929        return lps;
1930}
1931
1932/*
1933 * Free lport service tag. This can be called anytime after an alloc.
1934 * No need to wait for any pending login/logout completions.
1935 */
1936void
1937bfa_lps_delete(struct bfa_lps_s *lps)
1938{
1939        bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1940}
1941
1942/*
1943 * Initiate a lport login.
1944 */
1945void
1946bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1947        wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1948{
1949        lps->uarg       = uarg;
1950        lps->alpa       = alpa;
1951        lps->pdusz      = pdusz;
1952        lps->pwwn       = pwwn;
1953        lps->nwwn       = nwwn;
1954        lps->fdisc      = BFA_FALSE;
1955        lps->auth_en    = auth_en;
1956        bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1957}
1958
1959/*
1960 * Initiate a lport fdisc login.
1961 */
1962void
1963bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1964        wwn_t nwwn)
1965{
1966        lps->uarg       = uarg;
1967        lps->alpa       = 0;
1968        lps->pdusz      = pdusz;
1969        lps->pwwn       = pwwn;
1970        lps->nwwn       = nwwn;
1971        lps->fdisc      = BFA_TRUE;
1972        lps->auth_en    = BFA_FALSE;
1973        bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1974}
1975
1976
1977/*
1978 * Initiate a lport FDSIC logout.
1979 */
1980void
1981bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1982{
1983        bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1984}
1985
1986u8
1987bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1988{
1989        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1990
1991        return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1992}
1993
1994/*
1995 * Return lport services tag given the pid
1996 */
1997u8
1998bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1999{
2000        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
2001        struct bfa_lps_s        *lps;
2002        int                     i;
2003
2004        for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
2005                if (lps->lp_pid == pid)
2006                        return lps->bfa_tag;
2007        }
2008
2009        /* Return base port tag anyway */
2010        return 0;
2011}
2012
2013
2014/*
2015 * return port id assigned to the base lport
2016 */
2017u32
2018bfa_lps_get_base_pid(struct bfa_s *bfa)
2019{
2020        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
2021
2022        return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
2023}
2024
2025/*
2026 * Set PID in case of n2n (which is assigned during PLOGI)
2027 */
2028void
2029bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
2030{
2031        bfa_trc(lps->bfa, lps->bfa_tag);
2032        bfa_trc(lps->bfa, n2n_pid);
2033
2034        lps->lp_pid = n2n_pid;
2035        bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
2036}
2037
2038/*
2039 * LPS firmware message class handler.
2040 */
2041void
2042bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2043{
2044        union bfi_lps_i2h_msg_u msg;
2045
2046        bfa_trc(bfa, m->mhdr.msg_id);
2047        msg.msg = m;
2048
2049        switch (m->mhdr.msg_id) {
2050        case BFI_LPS_I2H_LOGIN_RSP:
2051                bfa_lps_login_rsp(bfa, msg.login_rsp);
2052                break;
2053
2054        case BFI_LPS_I2H_LOGOUT_RSP:
2055                bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2056                break;
2057
2058        case BFI_LPS_I2H_CVL_EVENT:
2059                bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2060                break;
2061
2062        default:
2063                bfa_trc(bfa, m->mhdr.msg_id);
2064                WARN_ON(1);
2065        }
2066}
2067
2068static void
2069bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2070{
2071        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2072        struct bfa_aen_entry_s  *aen_entry;
2073
2074        bfad_get_aen_entry(bfad, aen_entry);
2075        if (!aen_entry)
2076                return;
2077
2078        aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2079        aen_entry->aen_data.port.pwwn = fcport->pwwn;
2080
2081        /* Send the AEN notification */
2082        bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2083                                  BFA_AEN_CAT_PORT, event);
2084}
2085
2086/*
2087 * FC PORT state machine functions
2088 */
2089static void
2090bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2091                        enum bfa_fcport_sm_event event)
2092{
2093        bfa_trc(fcport->bfa, event);
2094
2095        switch (event) {
2096        case BFA_FCPORT_SM_START:
2097                /*
2098                 * Start event after IOC is configured and BFA is started.
2099                 */
2100                fcport->use_flash_cfg = BFA_TRUE;
2101
2102                if (bfa_fcport_send_enable(fcport)) {
2103                        bfa_trc(fcport->bfa, BFA_TRUE);
2104                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2105                } else {
2106                        bfa_trc(fcport->bfa, BFA_FALSE);
2107                        bfa_sm_set_state(fcport,
2108                                        bfa_fcport_sm_enabling_qwait);
2109                }
2110                break;
2111
2112        case BFA_FCPORT_SM_ENABLE:
2113                /*
2114                 * Port is persistently configured to be in enabled state. Do
2115                 * not change state. Port enabling is done when START event is
2116                 * received.
2117                 */
2118                break;
2119
2120        case BFA_FCPORT_SM_DISABLE:
2121                /*
2122                 * If a port is persistently configured to be disabled, the
2123                 * first event will a port disable request.
2124                 */
2125                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2126                break;
2127
2128        case BFA_FCPORT_SM_HWFAIL:
2129                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2130                break;
2131
2132        default:
2133                bfa_sm_fault(fcport->bfa, event);
2134        }
2135}
2136
2137static void
2138bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2139                                enum bfa_fcport_sm_event event)
2140{
2141        char pwwn_buf[BFA_STRING_32];
2142        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2143        bfa_trc(fcport->bfa, event);
2144
2145        switch (event) {
2146        case BFA_FCPORT_SM_QRESUME:
2147                bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2148                bfa_fcport_send_enable(fcport);
2149                break;
2150
2151        case BFA_FCPORT_SM_STOP:
2152                bfa_reqq_wcancel(&fcport->reqq_wait);
2153                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2154                break;
2155
2156        case BFA_FCPORT_SM_ENABLE:
2157                /*
2158                 * Already enable is in progress.
2159                 */
2160                break;
2161
2162        case BFA_FCPORT_SM_DISABLE:
2163                /*
2164                 * Just send disable request to firmware when room becomes
2165                 * available in request queue.
2166                 */
2167                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2168                bfa_reqq_wcancel(&fcport->reqq_wait);
2169                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2170                                BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2171                wwn2str(pwwn_buf, fcport->pwwn);
2172                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2173                        "Base port disabled: WWN = %s\n", pwwn_buf);
2174                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2175                break;
2176
2177        case BFA_FCPORT_SM_LINKUP:
2178        case BFA_FCPORT_SM_LINKDOWN:
2179                /*
2180                 * Possible to get link events when doing back-to-back
2181                 * enable/disables.
2182                 */
2183                break;
2184
2185        case BFA_FCPORT_SM_HWFAIL:
2186                bfa_reqq_wcancel(&fcport->reqq_wait);
2187                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2188                break;
2189
2190        case BFA_FCPORT_SM_FAA_MISCONFIG:
2191                bfa_fcport_reset_linkinfo(fcport);
2192                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2193                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2194                break;
2195
2196        default:
2197                bfa_sm_fault(fcport->bfa, event);
2198        }
2199}
2200
2201static void
2202bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2203                                                enum bfa_fcport_sm_event event)
2204{
2205        char pwwn_buf[BFA_STRING_32];
2206        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2207        bfa_trc(fcport->bfa, event);
2208
2209        switch (event) {
2210        case BFA_FCPORT_SM_FWRSP:
2211        case BFA_FCPORT_SM_LINKDOWN:
2212                bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2213                break;
2214
2215        case BFA_FCPORT_SM_LINKUP:
2216                bfa_fcport_update_linkinfo(fcport);
2217                bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2218
2219                WARN_ON(!fcport->event_cbfn);
2220                bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2221                break;
2222
2223        case BFA_FCPORT_SM_ENABLE:
2224                /*
2225                 * Already being enabled.
2226                 */
2227                break;
2228
2229        case BFA_FCPORT_SM_DISABLE:
2230                if (bfa_fcport_send_disable(fcport))
2231                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2232                else
2233                        bfa_sm_set_state(fcport,
2234                                         bfa_fcport_sm_disabling_qwait);
2235
2236                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2237                                BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2238                wwn2str(pwwn_buf, fcport->pwwn);
2239                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2240                        "Base port disabled: WWN = %s\n", pwwn_buf);
2241                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2242                break;
2243
2244        case BFA_FCPORT_SM_STOP:
2245                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2246                break;
2247
2248        case BFA_FCPORT_SM_HWFAIL:
2249                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2250                break;
2251
2252        case BFA_FCPORT_SM_FAA_MISCONFIG:
2253                bfa_fcport_reset_linkinfo(fcport);
2254                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2255                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2256                break;
2257
2258        default:
2259                bfa_sm_fault(fcport->bfa, event);
2260        }
2261}
2262
2263static void
2264bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2265                                                enum bfa_fcport_sm_event event)
2266{
2267        struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2268        char pwwn_buf[BFA_STRING_32];
2269        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2270
2271        bfa_trc(fcport->bfa, event);
2272
2273        switch (event) {
2274        case BFA_FCPORT_SM_LINKUP:
2275                bfa_fcport_update_linkinfo(fcport);
2276                bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2277                WARN_ON(!fcport->event_cbfn);
2278                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2279                                BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2280                if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2281
2282                        bfa_trc(fcport->bfa,
2283                                pevent->link_state.attr.vc_fcf.fcf.fipenabled);
2284                        bfa_trc(fcport->bfa,
2285                                pevent->link_state.attr.vc_fcf.fcf.fipfailed);
2286
2287                        if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
2288                                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2289                                        BFA_PL_EID_FIP_FCF_DISC, 0,
2290                                        "FIP FCF Discovery Failed");
2291                        else
2292                                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2293                                        BFA_PL_EID_FIP_FCF_DISC, 0,
2294                                        "FIP FCF Discovered");
2295                }
2296
2297                bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2298                wwn2str(pwwn_buf, fcport->pwwn);
2299                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2300                        "Base port online: WWN = %s\n", pwwn_buf);
2301                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2302
2303                /* If QoS is enabled and it is not online, send AEN */
2304                if (fcport->cfg.qos_enabled &&
2305                    fcport->qos_attr.state != BFA_QOS_ONLINE)
2306                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2307                break;
2308
2309        case BFA_FCPORT_SM_LINKDOWN:
2310                /*
2311                 * Possible to get link down event.
2312                 */
2313                break;
2314
2315        case BFA_FCPORT_SM_ENABLE:
2316                /*
2317                 * Already enabled.
2318                 */
2319                break;
2320
2321        case BFA_FCPORT_SM_DISABLE:
2322                if (bfa_fcport_send_disable(fcport))
2323                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2324                else
2325                        bfa_sm_set_state(fcport,
2326                                         bfa_fcport_sm_disabling_qwait);
2327
2328                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2329                                BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2330                wwn2str(pwwn_buf, fcport->pwwn);
2331                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2332                        "Base port disabled: WWN = %s\n", pwwn_buf);
2333                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2334                break;
2335
2336        case BFA_FCPORT_SM_STOP:
2337                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2338                break;
2339
2340        case BFA_FCPORT_SM_HWFAIL:
2341                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2342                break;
2343
2344        case BFA_FCPORT_SM_FAA_MISCONFIG:
2345                bfa_fcport_reset_linkinfo(fcport);
2346                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2347                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2348                break;
2349
2350        default:
2351                bfa_sm_fault(fcport->bfa, event);
2352        }
2353}
2354
2355static void
2356bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2357        enum bfa_fcport_sm_event event)
2358{
2359        char pwwn_buf[BFA_STRING_32];
2360        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2361
2362        bfa_trc(fcport->bfa, event);
2363
2364        switch (event) {
2365        case BFA_FCPORT_SM_ENABLE:
2366                /*
2367                 * Already enabled.
2368                 */
2369                break;
2370
2371        case BFA_FCPORT_SM_DISABLE:
2372                if (bfa_fcport_send_disable(fcport))
2373                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2374                else
2375                        bfa_sm_set_state(fcport,
2376                                         bfa_fcport_sm_disabling_qwait);
2377
2378                bfa_fcport_reset_linkinfo(fcport);
2379                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2380                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2381                                BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2382                wwn2str(pwwn_buf, fcport->pwwn);
2383                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2384                        "Base port offline: WWN = %s\n", pwwn_buf);
2385                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2386                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2387                        "Base port disabled: WWN = %s\n", pwwn_buf);
2388                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2389                break;
2390
2391        case BFA_FCPORT_SM_LINKDOWN:
2392                bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2393                bfa_fcport_reset_linkinfo(fcport);
2394                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2395                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2396                                BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2397                wwn2str(pwwn_buf, fcport->pwwn);
2398                if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2399                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2400                                "Base port offline: WWN = %s\n", pwwn_buf);
2401                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2402                } else {
2403                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2404                                "Base port (WWN = %s) "
2405                                "lost fabric connectivity\n", pwwn_buf);
2406                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2407                }
2408                break;
2409
2410        case BFA_FCPORT_SM_STOP:
2411                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2412                bfa_fcport_reset_linkinfo(fcport);
2413                wwn2str(pwwn_buf, fcport->pwwn);
2414                if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2415                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2416                                "Base port offline: WWN = %s\n", pwwn_buf);
2417                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2418                } else {
2419                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2420                                "Base port (WWN = %s) "
2421                                "lost fabric connectivity\n", pwwn_buf);
2422                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2423                }
2424                break;
2425
2426        case BFA_FCPORT_SM_HWFAIL:
2427                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2428                bfa_fcport_reset_linkinfo(fcport);
2429                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2430                wwn2str(pwwn_buf, fcport->pwwn);
2431                if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2432                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2433                                "Base port offline: WWN = %s\n", pwwn_buf);
2434                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2435                } else {
2436                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2437                                "Base port (WWN = %s) "
2438                                "lost fabric connectivity\n", pwwn_buf);
2439                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2440                }
2441                break;
2442
2443        case BFA_FCPORT_SM_FAA_MISCONFIG:
2444                bfa_fcport_reset_linkinfo(fcport);
2445                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2446                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2447                break;
2448
2449        default:
2450                bfa_sm_fault(fcport->bfa, event);
2451        }
2452}
2453
2454static void
2455bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2456                                 enum bfa_fcport_sm_event event)
2457{
2458        bfa_trc(fcport->bfa, event);
2459
2460        switch (event) {
2461        case BFA_FCPORT_SM_QRESUME:
2462                bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2463                bfa_fcport_send_disable(fcport);
2464                break;
2465
2466        case BFA_FCPORT_SM_STOP:
2467                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2468                bfa_reqq_wcancel(&fcport->reqq_wait);
2469                break;
2470
2471        case BFA_FCPORT_SM_ENABLE:
2472                bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2473                break;
2474
2475        case BFA_FCPORT_SM_DISABLE:
2476                /*
2477                 * Already being disabled.
2478                 */
2479                break;
2480
2481        case BFA_FCPORT_SM_LINKUP:
2482        case BFA_FCPORT_SM_LINKDOWN:
2483                /*
2484                 * Possible to get link events when doing back-to-back
2485                 * enable/disables.
2486                 */
2487                break;
2488
2489        case BFA_FCPORT_SM_HWFAIL:
2490                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2491                bfa_reqq_wcancel(&fcport->reqq_wait);
2492                break;
2493
2494        case BFA_FCPORT_SM_FAA_MISCONFIG:
2495                bfa_fcport_reset_linkinfo(fcport);
2496                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2497                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2498                break;
2499
2500        default:
2501                bfa_sm_fault(fcport->bfa, event);
2502        }
2503}
2504
2505static void
2506bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2507                                 enum bfa_fcport_sm_event event)
2508{
2509        bfa_trc(fcport->bfa, event);
2510
2511        switch (event) {
2512        case BFA_FCPORT_SM_QRESUME:
2513                bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2514                bfa_fcport_send_disable(fcport);
2515                if (bfa_fcport_send_enable(fcport))
2516                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2517                else
2518                        bfa_sm_set_state(fcport,
2519                                         bfa_fcport_sm_enabling_qwait);
2520                break;
2521
2522        case BFA_FCPORT_SM_STOP:
2523                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2524                bfa_reqq_wcancel(&fcport->reqq_wait);
2525                break;
2526
2527        case BFA_FCPORT_SM_ENABLE:
2528                break;
2529
2530        case BFA_FCPORT_SM_DISABLE:
2531                bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2532                break;
2533
2534        case BFA_FCPORT_SM_LINKUP:
2535        case BFA_FCPORT_SM_LINKDOWN:
2536                /*
2537                 * Possible to get link events when doing back-to-back
2538                 * enable/disables.
2539                 */
2540                break;
2541
2542        case BFA_FCPORT_SM_HWFAIL:
2543                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2544                bfa_reqq_wcancel(&fcport->reqq_wait);
2545                break;
2546
2547        default:
2548                bfa_sm_fault(fcport->bfa, event);
2549        }
2550}
2551
2552static void
2553bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2554                                                enum bfa_fcport_sm_event event)
2555{
2556        char pwwn_buf[BFA_STRING_32];
2557        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2558        bfa_trc(fcport->bfa, event);
2559
2560        switch (event) {
2561        case BFA_FCPORT_SM_FWRSP:
2562                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2563                break;
2564
2565        case BFA_FCPORT_SM_DISABLE:
2566                /*
2567                 * Already being disabled.
2568                 */
2569                break;
2570
2571        case BFA_FCPORT_SM_ENABLE:
2572                if (bfa_fcport_send_enable(fcport))
2573                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2574                else
2575                        bfa_sm_set_state(fcport,
2576                                         bfa_fcport_sm_enabling_qwait);
2577
2578                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2579                                BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2580                wwn2str(pwwn_buf, fcport->pwwn);
2581                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2582                        "Base port enabled: WWN = %s\n", pwwn_buf);
2583                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2584                break;
2585
2586        case BFA_FCPORT_SM_STOP:
2587                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2588                break;
2589
2590        case BFA_FCPORT_SM_LINKUP:
2591        case BFA_FCPORT_SM_LINKDOWN:
2592                /*
2593                 * Possible to get link events when doing back-to-back
2594                 * enable/disables.
2595                 */
2596                break;
2597
2598        case BFA_FCPORT_SM_HWFAIL:
2599                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2600                break;
2601
2602        default:
2603                bfa_sm_fault(fcport->bfa, event);
2604        }
2605}
2606
2607static void
2608bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2609                                                enum bfa_fcport_sm_event event)
2610{
2611        char pwwn_buf[BFA_STRING_32];
2612        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2613        bfa_trc(fcport->bfa, event);
2614
2615        switch (event) {
2616        case BFA_FCPORT_SM_START:
2617                /*
2618                 * Ignore start event for a port that is disabled.
2619                 */
2620                break;
2621
2622        case BFA_FCPORT_SM_STOP:
2623                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2624                break;
2625
2626        case BFA_FCPORT_SM_ENABLE:
2627                if (bfa_fcport_send_enable(fcport))
2628                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2629                else
2630                        bfa_sm_set_state(fcport,
2631                                         bfa_fcport_sm_enabling_qwait);
2632
2633                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2634                                BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2635                wwn2str(pwwn_buf, fcport->pwwn);
2636                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2637                        "Base port enabled: WWN = %s\n", pwwn_buf);
2638                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2639                break;
2640
2641        case BFA_FCPORT_SM_DISABLE:
2642                /*
2643                 * Already disabled.
2644                 */
2645                break;
2646
2647        case BFA_FCPORT_SM_HWFAIL:
2648                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2649                break;
2650
2651        case BFA_FCPORT_SM_DPORTENABLE:
2652                bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2653                break;
2654
2655        case BFA_FCPORT_SM_DDPORTENABLE:
2656                bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
2657                break;
2658
2659        default:
2660                bfa_sm_fault(fcport->bfa, event);
2661        }
2662}
2663
2664static void
2665bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2666                         enum bfa_fcport_sm_event event)
2667{
2668        bfa_trc(fcport->bfa, event);
2669
2670        switch (event) {
2671        case BFA_FCPORT_SM_START:
2672                if (bfa_fcport_send_enable(fcport))
2673                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2674                else
2675                        bfa_sm_set_state(fcport,
2676                                         bfa_fcport_sm_enabling_qwait);
2677                break;
2678
2679        default:
2680                /*
2681                 * Ignore all other events.
2682                 */
2683                ;
2684        }
2685}
2686
2687/*
2688 * Port is enabled. IOC is down/failed.
2689 */
2690static void
2691bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2692                         enum bfa_fcport_sm_event event)
2693{
2694        bfa_trc(fcport->bfa, event);
2695
2696        switch (event) {
2697        case BFA_FCPORT_SM_START:
2698                if (bfa_fcport_send_enable(fcport))
2699                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2700                else
2701                        bfa_sm_set_state(fcport,
2702                                         bfa_fcport_sm_enabling_qwait);
2703                break;
2704
2705        default:
2706                /*
2707                 * Ignore all events.
2708                 */
2709                ;
2710        }
2711}
2712
2713/*
2714 * Port is disabled. IOC is down/failed.
2715 */
2716static void
2717bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2718                         enum bfa_fcport_sm_event event)
2719{
2720        bfa_trc(fcport->bfa, event);
2721
2722        switch (event) {
2723        case BFA_FCPORT_SM_START:
2724                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2725                break;
2726
2727        case BFA_FCPORT_SM_ENABLE:
2728                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2729                break;
2730
2731        default:
2732                /*
2733                 * Ignore all events.
2734                 */
2735                ;
2736        }
2737}
2738
2739static void
2740bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2741{
2742        bfa_trc(fcport->bfa, event);
2743
2744        switch (event) {
2745        case BFA_FCPORT_SM_DPORTENABLE:
2746        case BFA_FCPORT_SM_DISABLE:
2747        case BFA_FCPORT_SM_ENABLE:
2748        case BFA_FCPORT_SM_START:
2749                /*
2750                 * Ignore event for a port that is dport
2751                 */
2752                break;
2753
2754        case BFA_FCPORT_SM_STOP:
2755                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2756                break;
2757
2758        case BFA_FCPORT_SM_HWFAIL:
2759                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2760                break;
2761
2762        case BFA_FCPORT_SM_DPORTDISABLE:
2763                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2764                break;
2765
2766        default:
2767                bfa_sm_fault(fcport->bfa, event);
2768        }
2769}
2770
2771static void
2772bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
2773                        enum bfa_fcport_sm_event event)
2774{
2775        bfa_trc(fcport->bfa, event);
2776
2777        switch (event) {
2778        case BFA_FCPORT_SM_DISABLE:
2779        case BFA_FCPORT_SM_DDPORTDISABLE:
2780                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2781                break;
2782
2783        case BFA_FCPORT_SM_DPORTENABLE:
2784        case BFA_FCPORT_SM_DPORTDISABLE:
2785        case BFA_FCPORT_SM_ENABLE:
2786        case BFA_FCPORT_SM_START:
2787                /**
2788                 * Ignore event for a port that is ddport
2789                 */
2790                break;
2791
2792        case BFA_FCPORT_SM_STOP:
2793                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2794                break;
2795
2796        case BFA_FCPORT_SM_HWFAIL:
2797                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2798                break;
2799
2800        default:
2801                bfa_sm_fault(fcport->bfa, event);
2802        }
2803}
2804
2805static void
2806bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
2807                            enum bfa_fcport_sm_event event)
2808{
2809        bfa_trc(fcport->bfa, event);
2810
2811        switch (event) {
2812        case BFA_FCPORT_SM_DPORTENABLE:
2813        case BFA_FCPORT_SM_ENABLE:
2814        case BFA_FCPORT_SM_START:
2815                /*
2816                 * Ignore event for a port as there is FAA misconfig
2817                 */
2818                break;
2819
2820        case BFA_FCPORT_SM_DISABLE:
2821                if (bfa_fcport_send_disable(fcport))
2822                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2823                else
2824                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2825
2826                bfa_fcport_reset_linkinfo(fcport);
2827                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2828                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2829                             BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2830                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2831                break;
2832
2833        case BFA_FCPORT_SM_STOP:
2834                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2835                break;
2836
2837        case BFA_FCPORT_SM_HWFAIL:
2838                bfa_fcport_reset_linkinfo(fcport);
2839                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2840                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2841                break;
2842
2843        default:
2844                bfa_sm_fault(fcport->bfa, event);
2845        }
2846}
2847
2848/*
2849 * Link state is down
2850 */
2851static void
2852bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2853                enum bfa_fcport_ln_sm_event event)
2854{
2855        bfa_trc(ln->fcport->bfa, event);
2856
2857        switch (event) {
2858        case BFA_FCPORT_LN_SM_LINKUP:
2859                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2860                bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2861                break;
2862
2863        default:
2864                bfa_sm_fault(ln->fcport->bfa, event);
2865        }
2866}
2867
2868/*
2869 * Link state is waiting for down notification
2870 */
2871static void
2872bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2873                enum bfa_fcport_ln_sm_event event)
2874{
2875        bfa_trc(ln->fcport->bfa, event);
2876
2877        switch (event) {
2878        case BFA_FCPORT_LN_SM_LINKUP:
2879                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2880                break;
2881
2882        case BFA_FCPORT_LN_SM_NOTIFICATION:
2883                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2884                break;
2885
2886        default:
2887                bfa_sm_fault(ln->fcport->bfa, event);
2888        }
2889}
2890
2891/*
2892 * Link state is waiting for down notification and there is a pending up
2893 */
2894static void
2895bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2896                enum bfa_fcport_ln_sm_event event)
2897{
2898        bfa_trc(ln->fcport->bfa, event);
2899
2900        switch (event) {
2901        case BFA_FCPORT_LN_SM_LINKDOWN:
2902                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2903                break;
2904
2905        case BFA_FCPORT_LN_SM_NOTIFICATION:
2906                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2907                bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2908                break;
2909
2910        default:
2911                bfa_sm_fault(ln->fcport->bfa, event);
2912        }
2913}
2914
2915/*
2916 * Link state is up
2917 */
2918static void
2919bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2920                enum bfa_fcport_ln_sm_event event)
2921{
2922        bfa_trc(ln->fcport->bfa, event);
2923
2924        switch (event) {
2925        case BFA_FCPORT_LN_SM_LINKDOWN:
2926                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2927                bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2928                break;
2929
2930        default:
2931                bfa_sm_fault(ln->fcport->bfa, event);
2932        }
2933}
2934
2935/*
2936 * Link state is waiting for up notification
2937 */
2938static void
2939bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2940                enum bfa_fcport_ln_sm_event event)
2941{
2942        bfa_trc(ln->fcport->bfa, event);
2943
2944        switch (event) {
2945        case BFA_FCPORT_LN_SM_LINKDOWN:
2946                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2947                break;
2948
2949        case BFA_FCPORT_LN_SM_NOTIFICATION:
2950                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2951                break;
2952
2953        default:
2954                bfa_sm_fault(ln->fcport->bfa, event);
2955        }
2956}
2957
2958/*
2959 * Link state is waiting for up notification and there is a pending down
2960 */
2961static void
2962bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2963                enum bfa_fcport_ln_sm_event event)
2964{
2965        bfa_trc(ln->fcport->bfa, event);
2966
2967        switch (event) {
2968        case BFA_FCPORT_LN_SM_LINKUP:
2969                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2970                break;
2971
2972        case BFA_FCPORT_LN_SM_NOTIFICATION:
2973                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2974                bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2975                break;
2976
2977        default:
2978                bfa_sm_fault(ln->fcport->bfa, event);
2979        }
2980}
2981
2982/*
2983 * Link state is waiting for up notification and there are pending down and up
2984 */
2985static void
2986bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2987                        enum bfa_fcport_ln_sm_event event)
2988{
2989        bfa_trc(ln->fcport->bfa, event);
2990
2991        switch (event) {
2992        case BFA_FCPORT_LN_SM_LINKDOWN:
2993                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2994                break;
2995
2996        case BFA_FCPORT_LN_SM_NOTIFICATION:
2997                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2998                bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2999                break;
3000
3001        default:
3002                bfa_sm_fault(ln->fcport->bfa, event);
3003        }
3004}
3005
3006static void
3007__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
3008{
3009        struct bfa_fcport_ln_s *ln = cbarg;
3010
3011        if (complete)
3012                ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
3013        else
3014                bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
3015}
3016
3017/*
3018 * Send SCN notification to upper layers.
3019 * trunk - false if caller is fcport to ignore fcport event in trunked mode
3020 */
3021static void
3022bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
3023        bfa_boolean_t trunk)
3024{
3025        if (fcport->cfg.trunked && !trunk)
3026                return;
3027
3028        switch (event) {
3029        case BFA_PORT_LINKUP:
3030                bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
3031                break;
3032        case BFA_PORT_LINKDOWN:
3033                bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
3034                break;
3035        default:
3036                WARN_ON(1);
3037        }
3038}
3039
3040static void
3041bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
3042{
3043        struct bfa_fcport_s *fcport = ln->fcport;
3044
3045        if (fcport->bfa->fcs) {
3046                fcport->event_cbfn(fcport->event_cbarg, event);
3047                bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
3048        } else {
3049                ln->ln_event = event;
3050                bfa_cb_queue(fcport->bfa, &ln->ln_qe,
3051                        __bfa_cb_fcport_event, ln);
3052        }
3053}
3054
3055#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
3056                                                        BFA_CACHELINE_SZ))
3057
3058static void
3059bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3060                   struct bfa_s *bfa)
3061{
3062        struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
3063
3064        bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
3065}
3066
3067static void
3068bfa_fcport_qresume(void *cbarg)
3069{
3070        struct bfa_fcport_s *fcport = cbarg;
3071
3072        bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
3073}
3074
3075static void
3076bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
3077{
3078        struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
3079
3080        fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
3081        fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
3082        fcport->stats = (union bfa_fcport_stats_u *)
3083                                bfa_mem_dma_virt(fcport_dma);
3084}
3085
3086/*
3087 * Memory initialization.
3088 */
3089static void
3090bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3091                struct bfa_pcidev_s *pcidev)
3092{
3093        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3094        struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
3095        struct bfa_fcport_ln_s *ln = &fcport->ln;
3096        struct timeval tv;
3097
3098        fcport->bfa = bfa;
3099        ln->fcport = fcport;
3100
3101        bfa_fcport_mem_claim(fcport);
3102
3103        bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
3104        bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
3105
3106        /*
3107         * initialize time stamp for stats reset
3108         */
3109        do_gettimeofday(&tv);
3110        fcport->stats_reset_time = tv.tv_sec;
3111        fcport->stats_dma_ready = BFA_FALSE;
3112
3113        /*
3114         * initialize and set default configuration
3115         */
3116        port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
3117        port_cfg->speed = BFA_PORT_SPEED_AUTO;
3118        port_cfg->trunked = BFA_FALSE;
3119        port_cfg->maxfrsize = 0;
3120
3121        port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3122        port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
3123        port_cfg->qos_bw.med = BFA_QOS_BW_MED;
3124        port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
3125
3126        fcport->fec_state = BFA_FEC_OFFLINE;
3127
3128        INIT_LIST_HEAD(&fcport->stats_pending_q);
3129        INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3130
3131        bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
3132}
3133
3134static void
3135bfa_fcport_detach(struct bfa_s *bfa)
3136{
3137}
3138
3139/*
3140 * Called when IOC is ready.
3141 */
3142static void
3143bfa_fcport_start(struct bfa_s *bfa)
3144{
3145        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
3146}
3147
3148/*
3149 * Called before IOC is stopped.
3150 */
3151static void
3152bfa_fcport_stop(struct bfa_s *bfa)
3153{
3154        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
3155        bfa_trunk_iocdisable(bfa);
3156}
3157
3158/*
3159 * Called when IOC failure is detected.
3160 */
3161static void
3162bfa_fcport_iocdisable(struct bfa_s *bfa)
3163{
3164        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3165
3166        bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
3167        bfa_trunk_iocdisable(bfa);
3168}
3169
3170/*
3171 * Update loop info in fcport for SCN online
3172 */
3173static void
3174bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3175                        struct bfa_fcport_loop_info_s *loop_info)
3176{
3177        fcport->myalpa = loop_info->myalpa;
3178        fcport->alpabm_valid =
3179                        loop_info->alpabm_val;
3180        memcpy(fcport->alpabm.alpa_bm,
3181                        loop_info->alpabm.alpa_bm,
3182                        sizeof(struct fc_alpabm_s));
3183}
3184
3185static void
3186bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3187{
3188        struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
3189        struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3190
3191        fcport->speed = pevent->link_state.speed;
3192        fcport->topology = pevent->link_state.topology;
3193
3194        if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3195                bfa_fcport_update_loop_info(fcport,
3196                                &pevent->link_state.attr.loop_info);
3197                return;
3198        }
3199
3200        /* QoS Details */
3201        fcport->qos_attr = pevent->link_state.qos_attr;
3202        fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
3203
3204        if (fcport->cfg.bb_cr_enabled)
3205                fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
3206
3207        fcport->fec_state = pevent->link_state.fec_state;
3208
3209        /*
3210         * update trunk state if applicable
3211         */
3212        if (!fcport->cfg.trunked)
3213                trunk->attr.state = BFA_TRUNK_DISABLED;
3214
3215        /* update FCoE specific */
3216        fcport->fcoe_vlan =
3217                be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
3218
3219        bfa_trc(fcport->bfa, fcport->speed);
3220        bfa_trc(fcport->bfa, fcport->topology);
3221}
3222
3223static void
3224bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3225{
3226        fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3227        fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3228        fcport->fec_state = BFA_FEC_OFFLINE;
3229}
3230
3231/*
3232 * Send port enable message to firmware.
3233 */
3234static bfa_boolean_t
3235bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3236{
3237        struct bfi_fcport_enable_req_s *m;
3238
3239        /*
3240         * Increment message tag before queue check, so that responses to old
3241         * requests are discarded.
3242         */
3243        fcport->msgtag++;
3244
3245        /*
3246         * check for room in queue to send request now
3247         */
3248        m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3249        if (!m) {
3250                bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3251                                                        &fcport->reqq_wait);
3252                return BFA_FALSE;
3253        }
3254
3255        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3256                        bfa_fn_lpu(fcport->bfa));
3257        m->nwwn = fcport->nwwn;
3258        m->pwwn = fcport->pwwn;
3259        m->port_cfg = fcport->cfg;
3260        m->msgtag = fcport->msgtag;
3261        m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3262         m->use_flash_cfg = fcport->use_flash_cfg;
3263        bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3264        bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3265        bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3266
3267        /*
3268         * queue I/O message to firmware
3269         */
3270        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3271        return BFA_TRUE;
3272}
3273
3274/*
3275 * Send port disable message to firmware.
3276 */
3277static  bfa_boolean_t
3278bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3279{
3280        struct bfi_fcport_req_s *m;
3281
3282        /*
3283         * Increment message tag before queue check, so that responses to old
3284         * requests are discarded.
3285         */
3286        fcport->msgtag++;
3287
3288        /*
3289         * check for room in queue to send request now
3290         */
3291        m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3292        if (!m) {
3293                bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3294                                                        &fcport->reqq_wait);
3295                return BFA_FALSE;
3296        }
3297
3298        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3299                        bfa_fn_lpu(fcport->bfa));
3300        m->msgtag = fcport->msgtag;
3301
3302        /*
3303         * queue I/O message to firmware
3304         */
3305        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3306
3307        return BFA_TRUE;
3308}
3309
3310static void
3311bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3312{
3313        fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3314        fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3315
3316        bfa_trc(fcport->bfa, fcport->pwwn);
3317        bfa_trc(fcport->bfa, fcport->nwwn);
3318}
3319
3320static void
3321bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3322        struct bfa_qos_stats_s *s)
3323{
3324        u32     *dip = (u32 *) d;
3325        __be32  *sip = (__be32 *) s;
3326        int             i;
3327
3328        /* Now swap the 32 bit fields */
3329        for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3330                dip[i] = be32_to_cpu(sip[i]);
3331}
3332
3333static void
3334bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3335        struct bfa_fcoe_stats_s *s)
3336{
3337        u32     *dip = (u32 *) d;
3338        __be32  *sip = (__be32 *) s;
3339        int             i;
3340
3341        for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3342             i = i + 2) {
3343#ifdef __BIG_ENDIAN
3344                dip[i] = be32_to_cpu(sip[i]);
3345                dip[i + 1] = be32_to_cpu(sip[i + 1]);
3346#else
3347                dip[i] = be32_to_cpu(sip[i + 1]);
3348                dip[i + 1] = be32_to_cpu(sip[i]);
3349#endif
3350        }
3351}
3352
3353static void
3354__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3355{
3356        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3357        struct bfa_cb_pending_q_s *cb;
3358        struct list_head *qe, *qen;
3359        union bfa_fcport_stats_u *ret;
3360
3361        if (complete) {
3362                struct timeval tv;
3363                if (fcport->stats_status == BFA_STATUS_OK)
3364                        do_gettimeofday(&tv);
3365
3366                list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3367                        bfa_q_deq(&fcport->stats_pending_q, &qe);
3368                        cb = (struct bfa_cb_pending_q_s *)qe;
3369                        if (fcport->stats_status == BFA_STATUS_OK) {
3370                                ret = (union bfa_fcport_stats_u *)cb->data;
3371                                /* Swap FC QoS or FCoE stats */
3372                                if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3373                                        bfa_fcport_qos_stats_swap(&ret->fcqos,
3374                                                        &fcport->stats->fcqos);
3375                                else {
3376                                        bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3377                                                        &fcport->stats->fcoe);
3378                                        ret->fcoe.secs_reset =
3379                                        tv.tv_sec - fcport->stats_reset_time;
3380                                }
3381                        }
3382                        bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3383                                        fcport->stats_status);
3384                }
3385                fcport->stats_status = BFA_STATUS_OK;
3386        } else {
3387                INIT_LIST_HEAD(&fcport->stats_pending_q);
3388                fcport->stats_status = BFA_STATUS_OK;
3389        }
3390}
3391
3392static void
3393bfa_fcport_stats_get_timeout(void *cbarg)
3394{
3395        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3396
3397        bfa_trc(fcport->bfa, fcport->stats_qfull);
3398
3399        if (fcport->stats_qfull) {
3400                bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3401                fcport->stats_qfull = BFA_FALSE;
3402        }
3403
3404        fcport->stats_status = BFA_STATUS_ETIMER;
3405        __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3406}
3407
3408static void
3409bfa_fcport_send_stats_get(void *cbarg)
3410{
3411        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3412        struct bfi_fcport_req_s *msg;
3413
3414        msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3415
3416        if (!msg) {
3417                fcport->stats_qfull = BFA_TRUE;
3418                bfa_reqq_winit(&fcport->stats_reqq_wait,
3419                                bfa_fcport_send_stats_get, fcport);
3420                bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3421                                &fcport->stats_reqq_wait);
3422                return;
3423        }
3424        fcport->stats_qfull = BFA_FALSE;
3425
3426        memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3427        bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3428                        bfa_fn_lpu(fcport->bfa));
3429        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3430}
3431
3432static void
3433__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3434{
3435        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3436        struct bfa_cb_pending_q_s *cb;
3437        struct list_head *qe, *qen;
3438
3439        if (complete) {
3440                struct timeval tv;
3441
3442                /*
3443                 * re-initialize time stamp for stats reset
3444                 */
3445                do_gettimeofday(&tv);
3446                fcport->stats_reset_time = tv.tv_sec;
3447                list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3448                        bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3449                        cb = (struct bfa_cb_pending_q_s *)qe;
3450                        bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3451                                                fcport->stats_status);
3452                }
3453                fcport->stats_status = BFA_STATUS_OK;
3454        } else {
3455                INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3456                fcport->stats_status = BFA_STATUS_OK;
3457        }
3458}
3459
3460static void
3461bfa_fcport_stats_clr_timeout(void *cbarg)
3462{
3463        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3464
3465        bfa_trc(fcport->bfa, fcport->stats_qfull);
3466
3467        if (fcport->stats_qfull) {
3468                bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3469                fcport->stats_qfull = BFA_FALSE;
3470        }
3471
3472        fcport->stats_status = BFA_STATUS_ETIMER;
3473        __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3474}
3475
3476static void
3477bfa_fcport_send_stats_clear(void *cbarg)
3478{
3479        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3480        struct bfi_fcport_req_s *msg;
3481
3482        msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3483
3484        if (!msg) {
3485                fcport->stats_qfull = BFA_TRUE;
3486                bfa_reqq_winit(&fcport->stats_reqq_wait,
3487                                bfa_fcport_send_stats_clear, fcport);
3488                bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3489                                                &fcport->stats_reqq_wait);
3490                return;
3491        }
3492        fcport->stats_qfull = BFA_FALSE;
3493
3494        memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3495        bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3496                        bfa_fn_lpu(fcport->bfa));
3497        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3498}
3499
3500/*
3501 * Handle trunk SCN event from firmware.
3502 */
3503static void
3504bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3505{
3506        struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3507        struct bfi_fcport_trunk_link_s *tlink;
3508        struct bfa_trunk_link_attr_s *lattr;
3509        enum bfa_trunk_state state_prev;
3510        int i;
3511        int link_bm = 0;
3512
3513        bfa_trc(fcport->bfa, fcport->cfg.trunked);
3514        WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3515                   scn->trunk_state != BFA_TRUNK_OFFLINE);
3516
3517        bfa_trc(fcport->bfa, trunk->attr.state);
3518        bfa_trc(fcport->bfa, scn->trunk_state);
3519        bfa_trc(fcport->bfa, scn->trunk_speed);
3520
3521        /*
3522         * Save off new state for trunk attribute query
3523         */
3524        state_prev = trunk->attr.state;
3525        if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3526                trunk->attr.state = scn->trunk_state;
3527        trunk->attr.speed = scn->trunk_speed;
3528        for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3529                lattr = &trunk->attr.link_attr[i];
3530                tlink = &scn->tlink[i];
3531
3532                lattr->link_state = tlink->state;
3533                lattr->trunk_wwn  = tlink->trunk_wwn;
3534                lattr->fctl       = tlink->fctl;
3535                lattr->speed      = tlink->speed;
3536                lattr->deskew     = be32_to_cpu(tlink->deskew);
3537
3538                if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3539                        fcport->speed    = tlink->speed;
3540                        fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3541                        link_bm |= 1 << i;
3542                }
3543
3544                bfa_trc(fcport->bfa, lattr->link_state);
3545                bfa_trc(fcport->bfa, lattr->trunk_wwn);
3546                bfa_trc(fcport->bfa, lattr->fctl);
3547                bfa_trc(fcport->bfa, lattr->speed);
3548                bfa_trc(fcport->bfa, lattr->deskew);
3549        }
3550
3551        switch (link_bm) {
3552        case 3:
3553                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3554                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3555                break;
3556        case 2:
3557                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3558                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3559                break;
3560        case 1:
3561                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3562                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3563                break;
3564        default:
3565                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3566                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3567        }
3568
3569        /*
3570         * Notify upper layers if trunk state changed.
3571         */
3572        if ((state_prev != trunk->attr.state) ||
3573                (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3574                bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3575                        BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3576        }
3577}
3578
3579static void
3580bfa_trunk_iocdisable(struct bfa_s *bfa)
3581{
3582        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3583        int i = 0;
3584
3585        /*
3586         * In trunked mode, notify upper layers that link is down
3587         */
3588        if (fcport->cfg.trunked) {
3589                if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3590                        bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3591
3592                fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3593                fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3594                for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3595                        fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3596                        fcport->trunk.attr.link_attr[i].fctl =
3597                                                BFA_TRUNK_LINK_FCTL_NORMAL;
3598                        fcport->trunk.attr.link_attr[i].link_state =
3599                                                BFA_TRUNK_LINK_STATE_DN_LINKDN;
3600                        fcport->trunk.attr.link_attr[i].speed =
3601                                                BFA_PORT_SPEED_UNKNOWN;
3602                        fcport->trunk.attr.link_attr[i].deskew = 0;
3603                }
3604        }
3605}
3606
3607/*
3608 * Called to initialize port attributes
3609 */
3610void
3611bfa_fcport_init(struct bfa_s *bfa)
3612{
3613        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3614
3615        /*
3616         * Initialize port attributes from IOC hardware data.
3617         */
3618        bfa_fcport_set_wwns(fcport);
3619        if (fcport->cfg.maxfrsize == 0)
3620                fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3621        fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3622        fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3623
3624        if (bfa_fcport_is_pbcdisabled(bfa))
3625                bfa->modules.port.pbc_disabled = BFA_TRUE;
3626
3627        WARN_ON(!fcport->cfg.maxfrsize);
3628        WARN_ON(!fcport->cfg.rx_bbcredit);
3629        WARN_ON(!fcport->speed_sup);
3630}
3631
3632/*
3633 * Firmware message handler.
3634 */
3635void
3636bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3637{
3638        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3639        union bfi_fcport_i2h_msg_u i2hmsg;
3640
3641        i2hmsg.msg = msg;
3642        fcport->event_arg.i2hmsg = i2hmsg;
3643
3644        bfa_trc(bfa, msg->mhdr.msg_id);
3645        bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3646
3647        switch (msg->mhdr.msg_id) {
3648        case BFI_FCPORT_I2H_ENABLE_RSP:
3649                if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3650
3651                        fcport->stats_dma_ready = BFA_TRUE;
3652                        if (fcport->use_flash_cfg) {
3653                                fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3654                                fcport->cfg.maxfrsize =
3655                                        cpu_to_be16(fcport->cfg.maxfrsize);
3656                                fcport->cfg.path_tov =
3657                                        cpu_to_be16(fcport->cfg.path_tov);
3658                                fcport->cfg.q_depth =
3659                                        cpu_to_be16(fcport->cfg.q_depth);
3660
3661                                if (fcport->cfg.trunked)
3662                                        fcport->trunk.attr.state =
3663                                                BFA_TRUNK_OFFLINE;
3664                                else
3665                                        fcport->trunk.attr.state =
3666                                                BFA_TRUNK_DISABLED;
3667                                fcport->qos_attr.qos_bw =
3668                                        i2hmsg.penable_rsp->port_cfg.qos_bw;
3669                                fcport->use_flash_cfg = BFA_FALSE;
3670                        }
3671
3672                        if (fcport->cfg.qos_enabled)
3673                                fcport->qos_attr.state = BFA_QOS_OFFLINE;
3674                        else
3675                                fcport->qos_attr.state = BFA_QOS_DISABLED;
3676
3677                        fcport->qos_attr.qos_bw_op =
3678                                        i2hmsg.penable_rsp->port_cfg.qos_bw;
3679
3680                        if (fcport->cfg.bb_cr_enabled)
3681                                fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3682                        else
3683                                fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3684
3685                        bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3686                }
3687                break;
3688
3689        case BFI_FCPORT_I2H_DISABLE_RSP:
3690                if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3691                        bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3692                break;
3693
3694        case BFI_FCPORT_I2H_EVENT:
3695                if (fcport->cfg.bb_cr_enabled)
3696                        fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3697                else
3698                        fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3699
3700                if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3701                        bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3702                else {
3703                        if (i2hmsg.event->link_state.linkstate_rsn ==
3704                            BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
3705                                bfa_sm_send_event(fcport,
3706                                                  BFA_FCPORT_SM_FAA_MISCONFIG);
3707                        else
3708                                bfa_sm_send_event(fcport,
3709                                                  BFA_FCPORT_SM_LINKDOWN);
3710                }
3711                fcport->qos_attr.qos_bw_op =
3712                                i2hmsg.event->link_state.qos_attr.qos_bw_op;
3713                break;
3714
3715        case BFI_FCPORT_I2H_TRUNK_SCN:
3716                bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3717                break;
3718
3719        case BFI_FCPORT_I2H_STATS_GET_RSP:
3720                /*
3721                 * check for timer pop before processing the rsp
3722                 */
3723                if (list_empty(&fcport->stats_pending_q) ||
3724                    (fcport->stats_status == BFA_STATUS_ETIMER))
3725                        break;
3726
3727                bfa_timer_stop(&fcport->timer);
3728                fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3729                __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3730                break;
3731
3732        case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3733                /*
3734                 * check for timer pop before processing the rsp
3735                 */
3736                if (list_empty(&fcport->statsclr_pending_q) ||
3737                    (fcport->stats_status == BFA_STATUS_ETIMER))
3738                        break;
3739
3740                bfa_timer_stop(&fcport->timer);
3741                fcport->stats_status = BFA_STATUS_OK;
3742                __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3743                break;
3744
3745        case BFI_FCPORT_I2H_ENABLE_AEN:
3746                bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3747                break;
3748
3749        case BFI_FCPORT_I2H_DISABLE_AEN:
3750                bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3751                break;
3752
3753        default:
3754                WARN_ON(1);
3755        break;
3756        }
3757}
3758
3759/*
3760 * Registered callback for port events.
3761 */
3762void
3763bfa_fcport_event_register(struct bfa_s *bfa,
3764                                void (*cbfn) (void *cbarg,
3765                                enum bfa_port_linkstate event),
3766                                void *cbarg)
3767{
3768        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3769
3770        fcport->event_cbfn = cbfn;
3771        fcport->event_cbarg = cbarg;
3772}
3773
3774bfa_status_t
3775bfa_fcport_enable(struct bfa_s *bfa)
3776{
3777        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3778
3779        if (bfa_fcport_is_pbcdisabled(bfa))
3780                return BFA_STATUS_PBC;
3781
3782        if (bfa_ioc_is_disabled(&bfa->ioc))
3783                return BFA_STATUS_IOC_DISABLED;
3784
3785        if (fcport->diag_busy)
3786                return BFA_STATUS_DIAG_BUSY;
3787
3788        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3789        return BFA_STATUS_OK;
3790}
3791
3792bfa_status_t
3793bfa_fcport_disable(struct bfa_s *bfa)
3794{
3795        if (bfa_fcport_is_pbcdisabled(bfa))
3796                return BFA_STATUS_PBC;
3797
3798        if (bfa_ioc_is_disabled(&bfa->ioc))
3799                return BFA_STATUS_IOC_DISABLED;
3800
3801        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3802        return BFA_STATUS_OK;
3803}
3804
3805/* If PBC is disabled on port, return error */
3806bfa_status_t
3807bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3808{
3809        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3810        struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3811        struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3812
3813        if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3814                bfa_trc(bfa, fcport->pwwn);
3815                return BFA_STATUS_PBC;
3816        }
3817        return BFA_STATUS_OK;
3818}
3819
3820/*
3821 * Configure port speed.
3822 */
3823bfa_status_t
3824bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3825{
3826        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3827
3828        bfa_trc(bfa, speed);
3829
3830        if (fcport->cfg.trunked == BFA_TRUE)
3831                return BFA_STATUS_TRUNK_ENABLED;
3832        if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3833                        (speed == BFA_PORT_SPEED_16GBPS))
3834                return BFA_STATUS_UNSUPP_SPEED;
3835        if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3836                bfa_trc(bfa, fcport->speed_sup);
3837                return BFA_STATUS_UNSUPP_SPEED;
3838        }
3839
3840        /* Port speed entered needs to be checked */
3841        if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3842                /* For CT2, 1G is not supported */
3843                if ((speed == BFA_PORT_SPEED_1GBPS) &&
3844                    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3845                        return BFA_STATUS_UNSUPP_SPEED;
3846
3847                /* Already checked for Auto Speed and Max Speed supp */
3848                if (!(speed == BFA_PORT_SPEED_1GBPS ||
3849                      speed == BFA_PORT_SPEED_2GBPS ||
3850                      speed == BFA_PORT_SPEED_4GBPS ||
3851                      speed == BFA_PORT_SPEED_8GBPS ||
3852                      speed == BFA_PORT_SPEED_16GBPS ||
3853                      speed == BFA_PORT_SPEED_AUTO))
3854                        return BFA_STATUS_UNSUPP_SPEED;
3855        } else {
3856                if (speed != BFA_PORT_SPEED_10GBPS)
3857                        return BFA_STATUS_UNSUPP_SPEED;
3858        }
3859
3860        fcport->cfg.speed = speed;
3861
3862        return BFA_STATUS_OK;
3863}
3864
3865/*
3866 * Get current speed.
3867 */
3868enum bfa_port_speed
3869bfa_fcport_get_speed(struct bfa_s *bfa)
3870{
3871        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3872
3873        return fcport->speed;
3874}
3875
3876/*
3877 * Configure port topology.
3878 */
3879bfa_status_t
3880bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3881{
3882        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3883
3884        bfa_trc(bfa, topology);
3885        bfa_trc(bfa, fcport->cfg.topology);
3886
3887        switch (topology) {
3888        case BFA_PORT_TOPOLOGY_P2P:
3889                break;
3890
3891        case BFA_PORT_TOPOLOGY_LOOP:
3892                if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3893                        (fcport->qos_attr.state != BFA_QOS_DISABLED))
3894                        return BFA_STATUS_ERROR_QOS_ENABLED;
3895                if (fcport->cfg.ratelimit != BFA_FALSE)
3896                        return BFA_STATUS_ERROR_TRL_ENABLED;
3897                if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3898                        (fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3899                        return BFA_STATUS_ERROR_TRUNK_ENABLED;
3900                if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3901                        (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3902                        return BFA_STATUS_UNSUPP_SPEED;
3903                if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3904                        return BFA_STATUS_LOOP_UNSUPP_MEZZ;
3905                if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3906                        return BFA_STATUS_DPORT_ERR;
3907                if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
3908                        return BFA_STATUS_DPORT_ERR;
3909                break;
3910
3911        case BFA_PORT_TOPOLOGY_AUTO:
3912                break;
3913
3914        default:
3915                return BFA_STATUS_EINVAL;
3916        }
3917
3918        fcport->cfg.topology = topology;
3919        return BFA_STATUS_OK;
3920}
3921
3922/*
3923 * Get current topology.
3924 */
3925enum bfa_port_topology
3926bfa_fcport_get_topology(struct bfa_s *bfa)
3927{
3928        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3929
3930        return fcport->topology;
3931}
3932
3933/**
3934 * Get config topology.
3935 */
3936enum bfa_port_topology
3937bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3938{
3939        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3940
3941        return fcport->cfg.topology;
3942}
3943
3944bfa_status_t
3945bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3946{
3947        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3948
3949        bfa_trc(bfa, alpa);
3950        bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3951        bfa_trc(bfa, fcport->cfg.hardalpa);
3952
3953        fcport->cfg.cfg_hardalpa = BFA_TRUE;
3954        fcport->cfg.hardalpa = alpa;
3955
3956        return BFA_STATUS_OK;
3957}
3958
3959bfa_status_t
3960bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3961{
3962        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3963
3964        bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3965        bfa_trc(bfa, fcport->cfg.hardalpa);
3966
3967        fcport->cfg.cfg_hardalpa = BFA_FALSE;
3968        return BFA_STATUS_OK;
3969}
3970
3971bfa_boolean_t
3972bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3973{
3974        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3975
3976        *alpa = fcport->cfg.hardalpa;
3977        return fcport->cfg.cfg_hardalpa;
3978}
3979
3980u8
3981bfa_fcport_get_myalpa(struct bfa_s *bfa)
3982{
3983        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3984
3985        return fcport->myalpa;
3986}
3987
3988bfa_status_t
3989bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3990{
3991        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3992
3993        bfa_trc(bfa, maxfrsize);
3994        bfa_trc(bfa, fcport->cfg.maxfrsize);
3995
3996        /* with in range */
3997        if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3998                return BFA_STATUS_INVLD_DFSZ;
3999
4000        /* power of 2, if not the max frame size of 2112 */
4001        if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
4002                return BFA_STATUS_INVLD_DFSZ;
4003
4004        fcport->cfg.maxfrsize = maxfrsize;
4005        return BFA_STATUS_OK;
4006}
4007
4008u16
4009bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
4010{
4011        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4012
4013        return fcport->cfg.maxfrsize;
4014}
4015
4016u8
4017bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
4018{
4019        if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
4020                return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
4021
4022        else
4023                return 0;
4024}
4025
4026void
4027bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
4028{
4029        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4030
4031        fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
4032}
4033
4034/*
4035 * Get port attributes.
4036 */
4037
4038wwn_t
4039bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
4040{
4041        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4042        if (node)
4043                return fcport->nwwn;
4044        else
4045                return fcport->pwwn;
4046}
4047
4048void
4049bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
4050{
4051        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4052
4053        memset(attr, 0, sizeof(struct bfa_port_attr_s));
4054
4055        attr->nwwn = fcport->nwwn;
4056        attr->pwwn = fcport->pwwn;
4057
4058        attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
4059        attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
4060
4061        memcpy(&attr->pport_cfg, &fcport->cfg,
4062                sizeof(struct bfa_port_cfg_s));
4063        /* speed attributes */
4064        attr->pport_cfg.speed = fcport->cfg.speed;
4065        attr->speed_supported = fcport->speed_sup;
4066        attr->speed = fcport->speed;
4067        attr->cos_supported = FC_CLASS_3;
4068
4069        /* topology attributes */
4070        attr->pport_cfg.topology = fcport->cfg.topology;
4071        attr->topology = fcport->topology;
4072        attr->pport_cfg.trunked = fcport->cfg.trunked;
4073
4074        /* beacon attributes */
4075        attr->beacon = fcport->beacon;
4076        attr->link_e2e_beacon = fcport->link_e2e_beacon;
4077
4078        attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
4079        attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
4080        attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
4081
4082        attr->fec_state = fcport->fec_state;
4083
4084        /* PBC Disabled State */
4085        if (bfa_fcport_is_pbcdisabled(bfa))
4086                attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
4087        else {
4088                if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
4089                        attr->port_state = BFA_PORT_ST_IOCDIS;
4090                else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
4091                        attr->port_state = BFA_PORT_ST_FWMISMATCH;
4092        }
4093
4094        /* FCoE vlan */
4095        attr->fcoe_vlan = fcport->fcoe_vlan;
4096}
4097
4098#define BFA_FCPORT_STATS_TOV    1000
4099
4100/*
4101 * Fetch port statistics (FCQoS or FCoE).
4102 */
4103bfa_status_t
4104bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4105{
4106        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4107
4108        if (!bfa_iocfc_is_operational(bfa) ||
4109            !fcport->stats_dma_ready)
4110                return BFA_STATUS_IOC_NON_OP;
4111
4112        if (!list_empty(&fcport->statsclr_pending_q))
4113                return BFA_STATUS_DEVBUSY;
4114
4115        if (list_empty(&fcport->stats_pending_q)) {
4116                list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4117                bfa_fcport_send_stats_get(fcport);
4118                bfa_timer_start(bfa, &fcport->timer,
4119                                bfa_fcport_stats_get_timeout,
4120                                fcport, BFA_FCPORT_STATS_TOV);
4121        } else
4122                list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4123
4124        return BFA_STATUS_OK;
4125}
4126
4127/*
4128 * Reset port statistics (FCQoS or FCoE).
4129 */
4130bfa_status_t
4131bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4132{
4133        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4134
4135        if (!bfa_iocfc_is_operational(bfa) ||
4136            !fcport->stats_dma_ready)
4137                return BFA_STATUS_IOC_NON_OP;
4138
4139        if (!list_empty(&fcport->stats_pending_q))
4140                return BFA_STATUS_DEVBUSY;
4141
4142        if (list_empty(&fcport->statsclr_pending_q)) {
4143                list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4144                bfa_fcport_send_stats_clear(fcport);
4145                bfa_timer_start(bfa, &fcport->timer,
4146                                bfa_fcport_stats_clr_timeout,
4147                                fcport, BFA_FCPORT_STATS_TOV);
4148        } else
4149                list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4150
4151        return BFA_STATUS_OK;
4152}
4153
4154/*
4155 * Fetch port attributes.
4156 */
4157bfa_boolean_t
4158bfa_fcport_is_disabled(struct bfa_s *bfa)
4159{
4160        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4161
4162        return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4163                BFA_PORT_ST_DISABLED;
4164
4165}
4166
4167bfa_boolean_t
4168bfa_fcport_is_dport(struct bfa_s *bfa)
4169{
4170        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4171
4172        return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4173                BFA_PORT_ST_DPORT);
4174}
4175
4176bfa_boolean_t
4177bfa_fcport_is_ddport(struct bfa_s *bfa)
4178{
4179        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4180
4181        return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4182                BFA_PORT_ST_DDPORT);
4183}
4184
4185bfa_status_t
4186bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
4187{
4188        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4189        enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
4190
4191        bfa_trc(bfa, ioc_type);
4192
4193        if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
4194                return BFA_STATUS_QOS_BW_INVALID;
4195
4196        if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
4197                return BFA_STATUS_QOS_BW_INVALID;
4198
4199        if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
4200            (qos_bw->low > qos_bw->high))
4201                return BFA_STATUS_QOS_BW_INVALID;
4202
4203        if ((ioc_type == BFA_IOC_TYPE_FC) &&
4204            (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
4205                fcport->cfg.qos_bw = *qos_bw;
4206
4207        return BFA_STATUS_OK;
4208}
4209
4210bfa_boolean_t
4211bfa_fcport_is_ratelim(struct bfa_s *bfa)
4212{
4213        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4214
4215        return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
4216
4217}
4218
4219/*
4220 *      Enable/Disable FAA feature in port config
4221 */
4222void
4223bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
4224{
4225        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4226
4227        bfa_trc(bfa, state);
4228        fcport->cfg.faa_state = state;
4229}
4230
4231/*
4232 * Get default minimum ratelim speed
4233 */
4234enum bfa_port_speed
4235bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
4236{
4237        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4238
4239        bfa_trc(bfa, fcport->cfg.trl_def_speed);
4240        return fcport->cfg.trl_def_speed;
4241
4242}
4243
4244void
4245bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4246                  bfa_boolean_t link_e2e_beacon)
4247{
4248        struct bfa_s *bfa = dev;
4249        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4250
4251        bfa_trc(bfa, beacon);
4252        bfa_trc(bfa, link_e2e_beacon);
4253        bfa_trc(bfa, fcport->beacon);
4254        bfa_trc(bfa, fcport->link_e2e_beacon);
4255
4256        fcport->beacon = beacon;
4257        fcport->link_e2e_beacon = link_e2e_beacon;
4258}
4259
4260bfa_boolean_t
4261bfa_fcport_is_linkup(struct bfa_s *bfa)
4262{
4263        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4264
4265        return  (!fcport->cfg.trunked &&
4266                 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4267                (fcport->cfg.trunked &&
4268                 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4269}
4270
4271bfa_boolean_t
4272bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4273{
4274        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4275
4276        return fcport->cfg.qos_enabled;
4277}
4278
4279bfa_boolean_t
4280bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
4281{
4282        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4283
4284        return fcport->cfg.trunked;
4285}
4286
4287bfa_status_t
4288bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
4289{
4290        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4291
4292        bfa_trc(bfa, on_off);
4293
4294        if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4295                return BFA_STATUS_BBCR_FC_ONLY;
4296
4297        if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
4298                (bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
4299                return BFA_STATUS_CMD_NOTSUPP_MEZZ;
4300
4301        if (on_off) {
4302                if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4303                        return BFA_STATUS_TOPOLOGY_LOOP;
4304
4305                if (fcport->cfg.qos_enabled)
4306                        return BFA_STATUS_ERROR_QOS_ENABLED;
4307
4308                if (fcport->cfg.trunked)
4309                        return BFA_STATUS_TRUNK_ENABLED;
4310
4311                if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
4312                        (fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
4313                        return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
4314
4315                if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
4316                        return BFA_STATUS_FEATURE_NOT_SUPPORTED;
4317
4318                if (fcport->cfg.bb_cr_enabled) {
4319                        if (bb_scn != fcport->cfg.bb_scn)
4320                                return BFA_STATUS_BBCR_CFG_NO_CHANGE;
4321                        else
4322                                return BFA_STATUS_NO_CHANGE;
4323                }
4324
4325                if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
4326                        bb_scn = BFA_BB_SCN_DEF;
4327
4328                fcport->cfg.bb_cr_enabled = on_off;
4329                fcport->cfg.bb_scn = bb_scn;
4330        } else {
4331                if (!fcport->cfg.bb_cr_enabled)
4332                        return BFA_STATUS_NO_CHANGE;
4333
4334                fcport->cfg.bb_cr_enabled = on_off;
4335                fcport->cfg.bb_scn = 0;
4336        }
4337
4338        return BFA_STATUS_OK;
4339}
4340
4341bfa_status_t
4342bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
4343                struct bfa_bbcr_attr_s *bbcr_attr)
4344{
4345        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4346
4347        if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4348                return BFA_STATUS_BBCR_FC_ONLY;
4349
4350        if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4351                return BFA_STATUS_TOPOLOGY_LOOP;
4352
4353        *bbcr_attr = fcport->bbcr_attr;
4354
4355        return BFA_STATUS_OK;
4356}
4357
4358void
4359bfa_fcport_dportenable(struct bfa_s *bfa)
4360{
4361        /*
4362         * Assume caller check for port is in disable state
4363         */
4364        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4365        bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4366}
4367
4368void
4369bfa_fcport_dportdisable(struct bfa_s *bfa)
4370{
4371        /*
4372         * Assume caller check for port is in disable state
4373         */
4374        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4375        bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4376}
4377
4378void
4379bfa_fcport_ddportenable(struct bfa_s *bfa)
4380{
4381        /*
4382         * Assume caller check for port is in disable state
4383         */
4384        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
4385}
4386
4387void
4388bfa_fcport_ddportdisable(struct bfa_s *bfa)
4389{
4390        /*
4391         * Assume caller check for port is in disable state
4392         */
4393        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
4394}
4395
4396/*
4397 * Rport State machine functions
4398 */
4399/*
4400 * Beginning state, only online event expected.
4401 */
4402static void
4403bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4404{
4405        bfa_trc(rp->bfa, rp->rport_tag);
4406        bfa_trc(rp->bfa, event);
4407
4408        switch (event) {
4409        case BFA_RPORT_SM_CREATE:
4410                bfa_stats(rp, sm_un_cr);
4411                bfa_sm_set_state(rp, bfa_rport_sm_created);
4412                break;
4413
4414        default:
4415                bfa_stats(rp, sm_un_unexp);
4416                bfa_sm_fault(rp->bfa, event);
4417        }
4418}
4419
4420static void
4421bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4422{
4423        bfa_trc(rp->bfa, rp->rport_tag);
4424        bfa_trc(rp->bfa, event);
4425
4426        switch (event) {
4427        case BFA_RPORT_SM_ONLINE:
4428                bfa_stats(rp, sm_cr_on);
4429                if (bfa_rport_send_fwcreate(rp))
4430                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4431                else
4432                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4433                break;
4434
4435        case BFA_RPORT_SM_DELETE:
4436                bfa_stats(rp, sm_cr_del);
4437                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4438                bfa_rport_free(rp);
4439                break;
4440
4441        case BFA_RPORT_SM_HWFAIL:
4442                bfa_stats(rp, sm_cr_hwf);
4443                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4444                break;
4445
4446        default:
4447                bfa_stats(rp, sm_cr_unexp);
4448                bfa_sm_fault(rp->bfa, event);
4449        }
4450}
4451
4452/*
4453 * Waiting for rport create response from firmware.
4454 */
4455static void
4456bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4457{
4458        bfa_trc(rp->bfa, rp->rport_tag);
4459        bfa_trc(rp->bfa, event);
4460
4461        switch (event) {
4462        case BFA_RPORT_SM_FWRSP:
4463                bfa_stats(rp, sm_fwc_rsp);
4464                bfa_sm_set_state(rp, bfa_rport_sm_online);
4465                bfa_rport_online_cb(rp);
4466                break;
4467
4468        case BFA_RPORT_SM_DELETE:
4469                bfa_stats(rp, sm_fwc_del);
4470                bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4471                break;
4472
4473        case BFA_RPORT_SM_OFFLINE:
4474                bfa_stats(rp, sm_fwc_off);
4475                bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4476                break;
4477
4478        case BFA_RPORT_SM_HWFAIL:
4479                bfa_stats(rp, sm_fwc_hwf);
4480                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4481                break;
4482
4483        default:
4484                bfa_stats(rp, sm_fwc_unexp);
4485                bfa_sm_fault(rp->bfa, event);
4486        }
4487}
4488
4489/*
4490 * Request queue is full, awaiting queue resume to send create request.
4491 */
4492static void
4493bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4494{
4495        bfa_trc(rp->bfa, rp->rport_tag);
4496        bfa_trc(rp->bfa, event);
4497
4498        switch (event) {
4499        case BFA_RPORT_SM_QRESUME:
4500                bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4501                bfa_rport_send_fwcreate(rp);
4502                break;
4503
4504        case BFA_RPORT_SM_DELETE:
4505                bfa_stats(rp, sm_fwc_del);
4506                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4507                bfa_reqq_wcancel(&rp->reqq_wait);
4508                bfa_rport_free(rp);
4509                break;
4510
4511        case BFA_RPORT_SM_OFFLINE:
4512                bfa_stats(rp, sm_fwc_off);
4513                bfa_sm_set_state(rp, bfa_rport_sm_offline);
4514                bfa_reqq_wcancel(&rp->reqq_wait);
4515                bfa_rport_offline_cb(rp);
4516                break;
4517
4518        case BFA_RPORT_SM_HWFAIL:
4519                bfa_stats(rp, sm_fwc_hwf);
4520                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4521                bfa_reqq_wcancel(&rp->reqq_wait);
4522                break;
4523
4524        default:
4525                bfa_stats(rp, sm_fwc_unexp);
4526                bfa_sm_fault(rp->bfa, event);
4527        }
4528}
4529
4530/*
4531 * Online state - normal parking state.
4532 */
4533static void
4534bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4535{
4536        struct bfi_rport_qos_scn_s *qos_scn;
4537
4538        bfa_trc(rp->bfa, rp->rport_tag);
4539        bfa_trc(rp->bfa, event);
4540
4541        switch (event) {
4542        case BFA_RPORT_SM_OFFLINE:
4543                bfa_stats(rp, sm_on_off);
4544                if (bfa_rport_send_fwdelete(rp))
4545                        bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4546                else
4547                        bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4548                break;
4549
4550        case BFA_RPORT_SM_DELETE:
4551                bfa_stats(rp, sm_on_del);
4552                if (bfa_rport_send_fwdelete(rp))
4553                        bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4554                else
4555                        bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4556                break;
4557
4558        case BFA_RPORT_SM_HWFAIL:
4559                bfa_stats(rp, sm_on_hwf);
4560                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4561                break;
4562
4563        case BFA_RPORT_SM_SET_SPEED:
4564                bfa_rport_send_fwspeed(rp);
4565                break;
4566
4567        case BFA_RPORT_SM_QOS_SCN:
4568                qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4569                rp->qos_attr = qos_scn->new_qos_attr;
4570                bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4571                bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4572                bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4573                bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4574
4575                qos_scn->old_qos_attr.qos_flow_id  =
4576                        be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4577                qos_scn->new_qos_attr.qos_flow_id  =
4578                        be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4579
4580                if (qos_scn->old_qos_attr.qos_flow_id !=
4581                        qos_scn->new_qos_attr.qos_flow_id)
4582                        bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4583                                                    qos_scn->old_qos_attr,
4584                                                    qos_scn->new_qos_attr);
4585                if (qos_scn->old_qos_attr.qos_priority !=
4586                        qos_scn->new_qos_attr.qos_priority)
4587                        bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4588                                                  qos_scn->old_qos_attr,
4589                                                  qos_scn->new_qos_attr);
4590                break;
4591
4592        default:
4593                bfa_stats(rp, sm_on_unexp);
4594                bfa_sm_fault(rp->bfa, event);
4595        }
4596}
4597
4598/*
4599 * Firmware rport is being deleted - awaiting f/w response.
4600 */
4601static void
4602bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4603{
4604        bfa_trc(rp->bfa, rp->rport_tag);
4605        bfa_trc(rp->bfa, event);
4606
4607        switch (event) {
4608        case BFA_RPORT_SM_FWRSP:
4609                bfa_stats(rp, sm_fwd_rsp);
4610                bfa_sm_set_state(rp, bfa_rport_sm_offline);
4611                bfa_rport_offline_cb(rp);
4612                break;
4613
4614        case BFA_RPORT_SM_DELETE:
4615                bfa_stats(rp, sm_fwd_del);
4616                bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4617                break;
4618
4619        case BFA_RPORT_SM_HWFAIL:
4620                bfa_stats(rp, sm_fwd_hwf);
4621                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4622                bfa_rport_offline_cb(rp);
4623                break;
4624
4625        default:
4626                bfa_stats(rp, sm_fwd_unexp);
4627                bfa_sm_fault(rp->bfa, event);
4628        }
4629}
4630
4631static void
4632bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4633{
4634        bfa_trc(rp->bfa, rp->rport_tag);
4635        bfa_trc(rp->bfa, event);
4636
4637        switch (event) {
4638        case BFA_RPORT_SM_QRESUME:
4639                bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4640                bfa_rport_send_fwdelete(rp);
4641                break;
4642
4643        case BFA_RPORT_SM_DELETE:
4644                bfa_stats(rp, sm_fwd_del);
4645                bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4646                break;
4647
4648        case BFA_RPORT_SM_HWFAIL:
4649                bfa_stats(rp, sm_fwd_hwf);
4650                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4651                bfa_reqq_wcancel(&rp->reqq_wait);
4652                bfa_rport_offline_cb(rp);
4653                break;
4654
4655        default:
4656                bfa_stats(rp, sm_fwd_unexp);
4657                bfa_sm_fault(rp->bfa, event);
4658        }
4659}
4660
4661/*
4662 * Offline state.
4663 */
4664static void
4665bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4666{
4667        bfa_trc(rp->bfa, rp->rport_tag);
4668        bfa_trc(rp->bfa, event);
4669
4670        switch (event) {
4671        case BFA_RPORT_SM_DELETE:
4672                bfa_stats(rp, sm_off_del);
4673                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4674                bfa_rport_free(rp);
4675                break;
4676
4677        case BFA_RPORT_SM_ONLINE:
4678                bfa_stats(rp, sm_off_on);
4679                if (bfa_rport_send_fwcreate(rp))
4680                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4681                else
4682                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4683                break;
4684
4685        case BFA_RPORT_SM_HWFAIL:
4686                bfa_stats(rp, sm_off_hwf);
4687                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4688                break;
4689
4690        case BFA_RPORT_SM_OFFLINE:
4691                bfa_rport_offline_cb(rp);
4692                break;
4693
4694        default:
4695                bfa_stats(rp, sm_off_unexp);
4696                bfa_sm_fault(rp->bfa, event);
4697        }
4698}
4699
4700/*
4701 * Rport is deleted, waiting for firmware response to delete.
4702 */
4703static void
4704bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4705{
4706        bfa_trc(rp->bfa, rp->rport_tag);
4707        bfa_trc(rp->bfa, event);
4708
4709        switch (event) {
4710        case BFA_RPORT_SM_FWRSP:
4711                bfa_stats(rp, sm_del_fwrsp);
4712                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4713                bfa_rport_free(rp);
4714                break;
4715
4716        case BFA_RPORT_SM_HWFAIL:
4717                bfa_stats(rp, sm_del_hwf);
4718                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4719                bfa_rport_free(rp);
4720                break;
4721
4722        default:
4723                bfa_sm_fault(rp->bfa, event);
4724        }
4725}
4726
4727static void
4728bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4729{
4730        bfa_trc(rp->bfa, rp->rport_tag);
4731        bfa_trc(rp->bfa, event);
4732
4733        switch (event) {
4734        case BFA_RPORT_SM_QRESUME:
4735                bfa_stats(rp, sm_del_fwrsp);
4736                bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4737                bfa_rport_send_fwdelete(rp);
4738                break;
4739
4740        case BFA_RPORT_SM_HWFAIL:
4741                bfa_stats(rp, sm_del_hwf);
4742                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4743                bfa_reqq_wcancel(&rp->reqq_wait);
4744                bfa_rport_free(rp);
4745                break;
4746
4747        default:
4748                bfa_sm_fault(rp->bfa, event);
4749        }
4750}
4751
4752/*
4753 * Waiting for rport create response from firmware. A delete is pending.
4754 */
4755static void
4756bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4757                                enum bfa_rport_event event)
4758{
4759        bfa_trc(rp->bfa, rp->rport_tag);
4760        bfa_trc(rp->bfa, event);
4761
4762        switch (event) {
4763        case BFA_RPORT_SM_FWRSP:
4764                bfa_stats(rp, sm_delp_fwrsp);
4765                if (bfa_rport_send_fwdelete(rp))
4766                        bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4767                else
4768                        bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4769                break;
4770
4771        case BFA_RPORT_SM_HWFAIL:
4772                bfa_stats(rp, sm_delp_hwf);
4773                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4774                bfa_rport_free(rp);
4775                break;
4776
4777        default:
4778                bfa_stats(rp, sm_delp_unexp);
4779                bfa_sm_fault(rp->bfa, event);
4780        }
4781}
4782
4783/*
4784 * Waiting for rport create response from firmware. Rport offline is pending.
4785 */
4786static void
4787bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4788                                 enum bfa_rport_event event)
4789{
4790        bfa_trc(rp->bfa, rp->rport_tag);
4791        bfa_trc(rp->bfa, event);
4792
4793        switch (event) {
4794        case BFA_RPORT_SM_FWRSP:
4795                bfa_stats(rp, sm_offp_fwrsp);
4796                if (bfa_rport_send_fwdelete(rp))
4797                        bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4798                else
4799                        bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4800                break;
4801
4802        case BFA_RPORT_SM_DELETE:
4803                bfa_stats(rp, sm_offp_del);
4804                bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4805                break;
4806
4807        case BFA_RPORT_SM_HWFAIL:
4808                bfa_stats(rp, sm_offp_hwf);
4809                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4810                bfa_rport_offline_cb(rp);
4811                break;
4812
4813        default:
4814                bfa_stats(rp, sm_offp_unexp);
4815                bfa_sm_fault(rp->bfa, event);
4816        }
4817}
4818
4819/*
4820 * IOC h/w failed.
4821 */
4822static void
4823bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4824{
4825        bfa_trc(rp->bfa, rp->rport_tag);
4826        bfa_trc(rp->bfa, event);
4827
4828        switch (event) {
4829        case BFA_RPORT_SM_OFFLINE:
4830                bfa_stats(rp, sm_iocd_off);
4831                bfa_rport_offline_cb(rp);
4832                break;
4833
4834        case BFA_RPORT_SM_DELETE:
4835                bfa_stats(rp, sm_iocd_del);
4836                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4837                bfa_rport_free(rp);
4838                break;
4839
4840        case BFA_RPORT_SM_ONLINE:
4841                bfa_stats(rp, sm_iocd_on);
4842                if (bfa_rport_send_fwcreate(rp))
4843                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4844                else
4845                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4846                break;
4847
4848        case BFA_RPORT_SM_HWFAIL:
4849                break;
4850
4851        default:
4852                bfa_stats(rp, sm_iocd_unexp);
4853                bfa_sm_fault(rp->bfa, event);
4854        }
4855}
4856
4857
4858
4859/*
4860 *  bfa_rport_private BFA rport private functions
4861 */
4862
4863static void
4864__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4865{
4866        struct bfa_rport_s *rp = cbarg;
4867
4868        if (complete)
4869                bfa_cb_rport_online(rp->rport_drv);
4870}
4871
4872static void
4873__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4874{
4875        struct bfa_rport_s *rp = cbarg;
4876
4877        if (complete)
4878                bfa_cb_rport_offline(rp->rport_drv);
4879}
4880
4881static void
4882bfa_rport_qresume(void *cbarg)
4883{
4884        struct bfa_rport_s      *rp = cbarg;
4885
4886        bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4887}
4888
4889static void
4890bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4891                struct bfa_s *bfa)
4892{
4893        struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4894
4895        if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4896                cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4897
4898        /* kva memory */
4899        bfa_mem_kva_setup(minfo, rport_kva,
4900                cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4901}
4902
4903static void
4904bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4905                struct bfa_pcidev_s *pcidev)
4906{
4907        struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4908        struct bfa_rport_s *rp;
4909        u16 i;
4910
4911        INIT_LIST_HEAD(&mod->rp_free_q);
4912        INIT_LIST_HEAD(&mod->rp_active_q);
4913        INIT_LIST_HEAD(&mod->rp_unused_q);
4914
4915        rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4916        mod->rps_list = rp;
4917        mod->num_rports = cfg->fwcfg.num_rports;
4918
4919        WARN_ON(!mod->num_rports ||
4920                   (mod->num_rports & (mod->num_rports - 1)));
4921
4922        for (i = 0; i < mod->num_rports; i++, rp++) {
4923                memset(rp, 0, sizeof(struct bfa_rport_s));
4924                rp->bfa = bfa;
4925                rp->rport_tag = i;
4926                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4927
4928                /*
4929                 *  - is unused
4930                 */
4931                if (i)
4932                        list_add_tail(&rp->qe, &mod->rp_free_q);
4933
4934                bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4935        }
4936
4937        /*
4938         * consume memory
4939         */
4940        bfa_mem_kva_curp(mod) = (u8 *) rp;
4941}
4942
4943static void
4944bfa_rport_detach(struct bfa_s *bfa)
4945{
4946}
4947
4948static void
4949bfa_rport_start(struct bfa_s *bfa)
4950{
4951}
4952
4953static void
4954bfa_rport_stop(struct bfa_s *bfa)
4955{
4956}
4957
4958static void
4959bfa_rport_iocdisable(struct bfa_s *bfa)
4960{
4961        struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4962        struct bfa_rport_s *rport;
4963        struct list_head *qe, *qen;
4964
4965        /* Enqueue unused rport resources to free_q */
4966        list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4967
4968        list_for_each_safe(qe, qen, &mod->rp_active_q) {
4969                rport = (struct bfa_rport_s *) qe;
4970                bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4971        }
4972}
4973
4974static struct bfa_rport_s *
4975bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4976{
4977        struct bfa_rport_s *rport;
4978
4979        bfa_q_deq(&mod->rp_free_q, &rport);
4980        if (rport)
4981                list_add_tail(&rport->qe, &mod->rp_active_q);
4982
4983        return rport;
4984}
4985
4986static void
4987bfa_rport_free(struct bfa_rport_s *rport)
4988{
4989        struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4990
4991        WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4992        list_del(&rport->qe);
4993        list_add_tail(&rport->qe, &mod->rp_free_q);
4994}
4995
4996static bfa_boolean_t
4997bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4998{
4999        struct bfi_rport_create_req_s *m;
5000
5001        /*
5002         * check for room in queue to send request now
5003         */
5004        m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
5005        if (!m) {
5006                bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
5007                return BFA_FALSE;
5008        }
5009
5010        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
5011                        bfa_fn_lpu(rp->bfa));
5012        m->bfa_handle = rp->rport_tag;
5013        m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
5014        m->pid = rp->rport_info.pid;
5015        m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
5016        m->local_pid = rp->rport_info.local_pid;
5017        m->fc_class = rp->rport_info.fc_class;
5018        m->vf_en = rp->rport_info.vf_en;
5019        m->vf_id = rp->rport_info.vf_id;
5020        m->cisc = rp->rport_info.cisc;
5021
5022        /*
5023         * queue I/O message to firmware
5024         */
5025        bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
5026        return BFA_TRUE;
5027}
5028
5029static bfa_boolean_t
5030bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
5031{
5032        struct bfi_rport_delete_req_s *m;
5033
5034        /*
5035         * check for room in queue to send request now
5036         */
5037        m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
5038        if (!m) {
5039                bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
5040                return BFA_FALSE;
5041        }
5042
5043        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
5044                        bfa_fn_lpu(rp->bfa));
5045        m->fw_handle = rp->fw_handle;
5046
5047        /*
5048         * queue I/O message to firmware
5049         */
5050        bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
5051        return BFA_TRUE;
5052}
5053
5054static bfa_boolean_t
5055bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
5056{
5057        struct bfa_rport_speed_req_s *m;
5058
5059        /*
5060         * check for room in queue to send request now
5061         */
5062        m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
5063        if (!m) {
5064                bfa_trc(rp->bfa, rp->rport_info.speed);
5065                return BFA_FALSE;
5066        }
5067
5068        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
5069                        bfa_fn_lpu(rp->bfa));
5070        m->fw_handle = rp->fw_handle;
5071        m->speed = (u8)rp->rport_info.speed;
5072
5073        /*
5074         * queue I/O message to firmware
5075         */
5076        bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
5077        return BFA_TRUE;
5078}
5079
5080
5081
5082/*
5083 *  bfa_rport_public
5084 */
5085
5086/*
5087 * Rport interrupt processing.
5088 */
5089void
5090bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
5091{
5092        union bfi_rport_i2h_msg_u msg;
5093        struct bfa_rport_s *rp;
5094
5095        bfa_trc(bfa, m->mhdr.msg_id);
5096
5097        msg.msg = m;
5098
5099        switch (m->mhdr.msg_id) {
5100        case BFI_RPORT_I2H_CREATE_RSP:
5101                rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
5102                rp->fw_handle = msg.create_rsp->fw_handle;
5103                rp->qos_attr = msg.create_rsp->qos_attr;
5104                bfa_rport_set_lunmask(bfa, rp);
5105                WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
5106                bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5107                break;
5108
5109        case BFI_RPORT_I2H_DELETE_RSP:
5110                rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
5111                WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
5112                bfa_rport_unset_lunmask(bfa, rp);
5113                bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5114                break;
5115
5116        case BFI_RPORT_I2H_QOS_SCN:
5117                rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
5118                rp->event_arg.fw_msg = msg.qos_scn_evt;
5119                bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
5120                break;
5121
5122        case BFI_RPORT_I2H_LIP_SCN_ONLINE:
5123                bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
5124                                &msg.lip_scn->loop_info);
5125                bfa_cb_rport_scn_online(bfa);
5126                break;
5127
5128        case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
5129                bfa_cb_rport_scn_offline(bfa);
5130                break;
5131
5132        case BFI_RPORT_I2H_NO_DEV:
5133                rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
5134                bfa_cb_rport_scn_no_dev(rp->rport_drv);
5135                break;
5136
5137        default:
5138                bfa_trc(bfa, m->mhdr.msg_id);
5139                WARN_ON(1);
5140        }
5141}
5142
5143void
5144bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
5145{
5146        struct bfa_rport_mod_s  *mod = BFA_RPORT_MOD(bfa);
5147        struct list_head        *qe;
5148        int     i;
5149
5150        for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
5151                bfa_q_deq_tail(&mod->rp_free_q, &qe);
5152                list_add_tail(qe, &mod->rp_unused_q);
5153        }
5154}
5155
5156/*
5157 *  bfa_rport_api
5158 */
5159
5160struct bfa_rport_s *
5161bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
5162{
5163        struct bfa_rport_s *rp;
5164
5165        rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
5166
5167        if (rp == NULL)
5168                return NULL;
5169
5170        rp->bfa = bfa;
5171        rp->rport_drv = rport_drv;
5172        memset(&rp->stats, 0, sizeof(rp->stats));
5173
5174        WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
5175        bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
5176
5177        return rp;
5178}
5179
5180void
5181bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
5182{
5183        WARN_ON(rport_info->max_frmsz == 0);
5184
5185        /*
5186         * Some JBODs are seen to be not setting PDU size correctly in PLOGI
5187         * responses. Default to minimum size.
5188         */
5189        if (rport_info->max_frmsz == 0) {
5190                bfa_trc(rport->bfa, rport->rport_tag);
5191                rport_info->max_frmsz = FC_MIN_PDUSZ;
5192        }
5193
5194        rport->rport_info = *rport_info;
5195        bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
5196}
5197
5198void
5199bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
5200{
5201        WARN_ON(speed == 0);
5202        WARN_ON(speed == BFA_PORT_SPEED_AUTO);
5203
5204        if (rport) {
5205                rport->rport_info.speed = speed;
5206                bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
5207        }
5208}
5209
5210/* Set Rport LUN Mask */
5211void
5212bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5213{
5214        struct bfa_lps_mod_s    *lps_mod = BFA_LPS_MOD(bfa);
5215        wwn_t   lp_wwn, rp_wwn;
5216        u8 lp_tag = (u8)rp->rport_info.lp_tag;
5217
5218        rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5219        lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5220
5221        BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5222                                        rp->lun_mask = BFA_TRUE;
5223        bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
5224}
5225
5226/* Unset Rport LUN mask */
5227void
5228bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5229{
5230        struct bfa_lps_mod_s    *lps_mod = BFA_LPS_MOD(bfa);
5231        wwn_t   lp_wwn, rp_wwn;
5232
5233        rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5234        lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5235
5236        BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5237                                rp->lun_mask = BFA_FALSE;
5238        bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
5239                        BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
5240}
5241
5242/*
5243 * SGPG related functions
5244 */
5245
5246/*
5247 * Compute and return memory needed by FCP(im) module.
5248 */
5249static void
5250bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5251                struct bfa_s *bfa)
5252{
5253        struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
5254        struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
5255        struct bfa_mem_dma_s *seg_ptr;
5256        u16     nsegs, idx, per_seg_sgpg, num_sgpg;
5257        u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
5258
5259        if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
5260                cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
5261        else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
5262                cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
5263
5264        num_sgpg = cfg->drvcfg.num_sgpgs;
5265
5266        nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5267        per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
5268
5269        bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
5270                if (num_sgpg >= per_seg_sgpg) {
5271                        num_sgpg -= per_seg_sgpg;
5272                        bfa_mem_dma_setup(minfo, seg_ptr,
5273                                        per_seg_sgpg * sgpg_sz);
5274                } else
5275                        bfa_mem_dma_setup(minfo, seg_ptr,
5276                                        num_sgpg * sgpg_sz);
5277        }
5278
5279        /* kva memory */
5280        bfa_mem_kva_setup(minfo, sgpg_kva,
5281                cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
5282}
5283
5284static void
5285bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5286                struct bfa_pcidev_s *pcidev)
5287{
5288        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5289        struct bfa_sgpg_s *hsgpg;
5290        struct bfi_sgpg_s *sgpg;
5291        u64 align_len;
5292        struct bfa_mem_dma_s *seg_ptr;
5293        u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
5294        u16     i, idx, nsegs, per_seg_sgpg, num_sgpg;
5295
5296        union {
5297                u64 pa;
5298                union bfi_addr_u addr;
5299        } sgpg_pa, sgpg_pa_tmp;
5300
5301        INIT_LIST_HEAD(&mod->sgpg_q);
5302        INIT_LIST_HEAD(&mod->sgpg_wait_q);
5303
5304        bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
5305
5306        mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
5307
5308        num_sgpg = cfg->drvcfg.num_sgpgs;
5309        nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5310
5311        /* dma/kva mem claim */
5312        hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
5313
5314        bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
5315
5316                if (!bfa_mem_dma_virt(seg_ptr))
5317                        break;
5318
5319                align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
5320                                             bfa_mem_dma_phys(seg_ptr);
5321
5322                sgpg = (struct bfi_sgpg_s *)
5323                        (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
5324                sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
5325                WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
5326
5327                per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
5328
5329                for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
5330                        memset(hsgpg, 0, sizeof(*hsgpg));
5331                        memset(sgpg, 0, sizeof(*sgpg));
5332
5333                        hsgpg->sgpg = sgpg;
5334                        sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
5335                        hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
5336                        list_add_tail(&hsgpg->qe, &mod->sgpg_q);
5337
5338                        sgpg++;
5339                        hsgpg++;
5340                        sgpg_pa.pa += sgpg_sz;
5341                }
5342        }
5343
5344        bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
5345}
5346
5347static void
5348bfa_sgpg_detach(struct bfa_s *bfa)
5349{
5350}
5351
5352static void
5353bfa_sgpg_start(struct bfa_s *bfa)
5354{
5355}
5356
5357static void
5358bfa_sgpg_stop(struct bfa_s *bfa)
5359{
5360}
5361
5362static void
5363bfa_sgpg_iocdisable(struct bfa_s *bfa)
5364{
5365}
5366
5367bfa_status_t
5368bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5369{
5370        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5371        struct bfa_sgpg_s *hsgpg;
5372        int i;
5373
5374        if (mod->free_sgpgs < nsgpgs)
5375                return BFA_STATUS_ENOMEM;
5376
5377        for (i = 0; i < nsgpgs; i++) {
5378                bfa_q_deq(&mod->sgpg_q, &hsgpg);
5379                WARN_ON(!hsgpg);
5380                list_add_tail(&hsgpg->qe, sgpg_q);
5381        }
5382
5383        mod->free_sgpgs -= nsgpgs;
5384        return BFA_STATUS_OK;
5385}
5386
5387void
5388bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5389{
5390        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5391        struct bfa_sgpg_wqe_s *wqe;
5392
5393        mod->free_sgpgs += nsgpg;
5394        WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
5395
5396        list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5397
5398        if (list_empty(&mod->sgpg_wait_q))
5399                return;
5400
5401        /*
5402         * satisfy as many waiting requests as possible
5403         */
5404        do {
5405                wqe = bfa_q_first(&mod->sgpg_wait_q);
5406                if (mod->free_sgpgs < wqe->nsgpg)
5407                        nsgpg = mod->free_sgpgs;
5408                else
5409                        nsgpg = wqe->nsgpg;
5410                bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5411                wqe->nsgpg -= nsgpg;
5412                if (wqe->nsgpg == 0) {
5413                        list_del(&wqe->qe);
5414                        wqe->cbfn(wqe->cbarg);
5415                }
5416        } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5417}
5418
5419void
5420bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5421{
5422        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5423
5424        WARN_ON(nsgpg <= 0);
5425        WARN_ON(nsgpg <= mod->free_sgpgs);
5426
5427        wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5428
5429        /*
5430         * allocate any left to this one first
5431         */
5432        if (mod->free_sgpgs) {
5433                /*
5434                 * no one else is waiting for SGPG
5435                 */
5436                WARN_ON(!list_empty(&mod->sgpg_wait_q));
5437                list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5438                wqe->nsgpg -= mod->free_sgpgs;
5439                mod->free_sgpgs = 0;
5440        }
5441
5442        list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5443}
5444
5445void
5446bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5447{
5448        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5449
5450        WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5451        list_del(&wqe->qe);
5452
5453        if (wqe->nsgpg_total != wqe->nsgpg)
5454                bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5455                                   wqe->nsgpg_total - wqe->nsgpg);
5456}
5457
5458void
5459bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5460                   void *cbarg)
5461{
5462        INIT_LIST_HEAD(&wqe->sgpg_q);
5463        wqe->cbfn = cbfn;
5464        wqe->cbarg = cbarg;
5465}
5466
5467/*
5468 *  UF related functions
5469 */
5470/*
5471 *****************************************************************************
5472 * Internal functions
5473 *****************************************************************************
5474 */
5475static void
5476__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5477{
5478        struct bfa_uf_s   *uf = cbarg;
5479        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5480
5481        if (complete)
5482                ufm->ufrecv(ufm->cbarg, uf);
5483}
5484
5485static void
5486claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5487{
5488        struct bfi_uf_buf_post_s *uf_bp_msg;
5489        u16 i;
5490        u16 buf_len;
5491
5492        ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5493        uf_bp_msg = ufm->uf_buf_posts;
5494
5495        for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5496             i++, uf_bp_msg++) {
5497                memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5498
5499                uf_bp_msg->buf_tag = i;
5500                buf_len = sizeof(struct bfa_uf_buf_s);
5501                uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5502                bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5503                            bfa_fn_lpu(ufm->bfa));
5504                bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5505        }
5506
5507        /*
5508         * advance pointer beyond consumed memory
5509         */
5510        bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5511}
5512
5513static void
5514claim_ufs(struct bfa_uf_mod_s *ufm)
5515{
5516        u16 i;
5517        struct bfa_uf_s   *uf;
5518
5519        /*
5520         * Claim block of memory for UF list
5521         */
5522        ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5523
5524        /*
5525         * Initialize UFs and queue it in UF free queue
5526         */
5527        for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5528                memset(uf, 0, sizeof(struct bfa_uf_s));
5529                uf->bfa = ufm->bfa;
5530                uf->uf_tag = i;
5531                uf->pb_len = BFA_PER_UF_DMA_SZ;
5532                uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5533                uf->buf_pa = ufm_pbs_pa(ufm, i);
5534                list_add_tail(&uf->qe, &ufm->uf_free_q);
5535        }
5536
5537        /*
5538         * advance memory pointer
5539         */
5540        bfa_mem_kva_curp(ufm) = (u8 *) uf;
5541}
5542
5543static void
5544uf_mem_claim(struct bfa_uf_mod_s *ufm)
5545{
5546        claim_ufs(ufm);
5547        claim_uf_post_msgs(ufm);
5548}
5549
5550static void
5551bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5552                struct bfa_s *bfa)
5553{
5554        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5555        struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5556        u32     num_ufs = cfg->fwcfg.num_uf_bufs;
5557        struct bfa_mem_dma_s *seg_ptr;
5558        u16     nsegs, idx, per_seg_uf = 0;
5559
5560        nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5561        per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5562
5563        bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5564                if (num_ufs >= per_seg_uf) {
5565                        num_ufs -= per_seg_uf;
5566                        bfa_mem_dma_setup(minfo, seg_ptr,
5567                                per_seg_uf * BFA_PER_UF_DMA_SZ);
5568                } else
5569                        bfa_mem_dma_setup(minfo, seg_ptr,
5570                                num_ufs * BFA_PER_UF_DMA_SZ);
5571        }
5572
5573        /* kva memory */
5574        bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5575                (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5576}
5577
5578static void
5579bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5580                struct bfa_pcidev_s *pcidev)
5581{
5582        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5583
5584        ufm->bfa = bfa;
5585        ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5586        INIT_LIST_HEAD(&ufm->uf_free_q);
5587        INIT_LIST_HEAD(&ufm->uf_posted_q);
5588        INIT_LIST_HEAD(&ufm->uf_unused_q);
5589
5590        uf_mem_claim(ufm);
5591}
5592
5593static void
5594bfa_uf_detach(struct bfa_s *bfa)
5595{
5596}
5597
5598static struct bfa_uf_s *
5599bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5600{
5601        struct bfa_uf_s   *uf;
5602
5603        bfa_q_deq(&uf_mod->uf_free_q, &uf);
5604        return uf;
5605}
5606
5607static void
5608bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5609{
5610        list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5611}
5612
5613static bfa_status_t
5614bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5615{
5616        struct bfi_uf_buf_post_s *uf_post_msg;
5617
5618        uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5619        if (!uf_post_msg)
5620                return BFA_STATUS_FAILED;
5621
5622        memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5623                      sizeof(struct bfi_uf_buf_post_s));
5624        bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5625
5626        bfa_trc(ufm->bfa, uf->uf_tag);
5627
5628        list_add_tail(&uf->qe, &ufm->uf_posted_q);
5629        return BFA_STATUS_OK;
5630}
5631
5632static void
5633bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5634{
5635        struct bfa_uf_s   *uf;
5636
5637        while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5638                if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5639                        break;
5640        }
5641}
5642
5643static void
5644uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5645{
5646        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5647        u16 uf_tag = m->buf_tag;
5648        struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5649        struct bfa_uf_buf_s *uf_buf;
5650        uint8_t *buf;
5651        struct fchs_s *fchs;
5652
5653        uf_buf = (struct bfa_uf_buf_s *)
5654                        bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5655        buf = &uf_buf->d[0];
5656
5657        m->frm_len = be16_to_cpu(m->frm_len);
5658        m->xfr_len = be16_to_cpu(m->xfr_len);
5659
5660        fchs = (struct fchs_s *)uf_buf;
5661
5662        list_del(&uf->qe);      /* dequeue from posted queue */
5663
5664        uf->data_ptr = buf;
5665        uf->data_len = m->xfr_len;
5666
5667        WARN_ON(uf->data_len < sizeof(struct fchs_s));
5668
5669        if (uf->data_len == sizeof(struct fchs_s)) {
5670                bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5671                               uf->data_len, (struct fchs_s *)buf);
5672        } else {
5673                u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5674                bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5675                                      BFA_PL_EID_RX, uf->data_len,
5676                                      (struct fchs_s *)buf, pld_w0);
5677        }
5678
5679        if (bfa->fcs)
5680                __bfa_cb_uf_recv(uf, BFA_TRUE);
5681        else
5682                bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5683}
5684
5685static void
5686bfa_uf_stop(struct bfa_s *bfa)
5687{
5688}
5689
5690static void
5691bfa_uf_iocdisable(struct bfa_s *bfa)
5692{
5693        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5694        struct bfa_uf_s *uf;
5695        struct list_head *qe, *qen;
5696
5697        /* Enqueue unused uf resources to free_q */
5698        list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5699
5700        list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5701                uf = (struct bfa_uf_s *) qe;
5702                list_del(&uf->qe);
5703                bfa_uf_put(ufm, uf);
5704        }
5705}
5706
5707static void
5708bfa_uf_start(struct bfa_s *bfa)
5709{
5710        bfa_uf_post_all(BFA_UF_MOD(bfa));
5711}
5712
5713/*
5714 * Register handler for all unsolicted receive frames.
5715 *
5716 * @param[in]   bfa             BFA instance
5717 * @param[in]   ufrecv  receive handler function
5718 * @param[in]   cbarg   receive handler arg
5719 */
5720void
5721bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5722{
5723        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5724
5725        ufm->ufrecv = ufrecv;
5726        ufm->cbarg = cbarg;
5727}
5728
5729/*
5730 *      Free an unsolicited frame back to BFA.
5731 *
5732 * @param[in]           uf              unsolicited frame to be freed
5733 *
5734 * @return None
5735 */
5736void
5737bfa_uf_free(struct bfa_uf_s *uf)
5738{
5739        bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5740        bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5741}
5742
5743
5744
5745/*
5746 *  uf_pub BFA uf module public functions
5747 */
5748void
5749bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5750{
5751        bfa_trc(bfa, msg->mhdr.msg_id);
5752
5753        switch (msg->mhdr.msg_id) {
5754        case BFI_UF_I2H_FRM_RCVD:
5755                uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5756                break;
5757
5758        default:
5759                bfa_trc(bfa, msg->mhdr.msg_id);
5760                WARN_ON(1);
5761        }
5762}
5763
5764void
5765bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5766{
5767        struct bfa_uf_mod_s     *mod = BFA_UF_MOD(bfa);
5768        struct list_head        *qe;
5769        int     i;
5770
5771        for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5772                bfa_q_deq_tail(&mod->uf_free_q, &qe);
5773                list_add_tail(qe, &mod->uf_unused_q);
5774        }
5775}
5776
5777/*
5778 *      Dport forward declaration
5779 */
5780
5781enum bfa_dport_test_state_e {
5782        BFA_DPORT_ST_DISABLED   = 0,    /*!< dport is disabled */
5783        BFA_DPORT_ST_INP        = 1,    /*!< test in progress */
5784        BFA_DPORT_ST_COMP       = 2,    /*!< test complete successfully */
5785        BFA_DPORT_ST_NO_SFP     = 3,    /*!< sfp is not present */
5786        BFA_DPORT_ST_NOTSTART   = 4,    /*!< test not start dport is enabled */
5787};
5788
5789/*
5790 * BFA DPORT state machine events
5791 */
5792enum bfa_dport_sm_event {
5793        BFA_DPORT_SM_ENABLE     = 1,    /* dport enable event         */
5794        BFA_DPORT_SM_DISABLE    = 2,    /* dport disable event        */
5795        BFA_DPORT_SM_FWRSP      = 3,    /* fw enable/disable rsp      */
5796        BFA_DPORT_SM_QRESUME    = 4,    /* CQ space available         */
5797        BFA_DPORT_SM_HWFAIL     = 5,    /* IOC h/w failure            */
5798        BFA_DPORT_SM_START      = 6,    /* re-start dport test        */
5799        BFA_DPORT_SM_REQFAIL    = 7,    /* request failure            */
5800        BFA_DPORT_SM_SCN        = 8,    /* state change notify frm fw */
5801};
5802
5803static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5804                                  enum bfa_dport_sm_event event);
5805static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5806                                  enum bfa_dport_sm_event event);
5807static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5808                                  enum bfa_dport_sm_event event);
5809static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5810                                 enum bfa_dport_sm_event event);
5811static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5812                                 enum bfa_dport_sm_event event);
5813static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5814                                   enum bfa_dport_sm_event event);
5815static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
5816                                        enum bfa_dport_sm_event event);
5817static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
5818                                  enum bfa_dport_sm_event event);
5819static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
5820                                   enum bfa_dport_sm_event event);
5821static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
5822                                   enum bfa_dport_sm_event event);
5823static void bfa_dport_qresume(void *cbarg);
5824static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5825                                struct bfi_diag_dport_rsp_s *msg);
5826static void bfa_dport_scn(struct bfa_dport_s *dport,
5827                                struct bfi_diag_dport_scn_s *msg);
5828
5829/*
5830 *      BFA fcdiag module
5831 */
5832#define BFA_DIAG_QTEST_TOV      1000    /* msec */
5833
5834/*
5835 *      Set port status to busy
5836 */
5837static void
5838bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5839{
5840        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5841
5842        if (fcdiag->lb.lock)
5843                fcport->diag_busy = BFA_TRUE;
5844        else
5845                fcport->diag_busy = BFA_FALSE;
5846}
5847
5848static void
5849bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5850                struct bfa_s *bfa)
5851{
5852}
5853
5854static void
5855bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5856                struct bfa_pcidev_s *pcidev)
5857{
5858        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5859        struct bfa_dport_s  *dport = &fcdiag->dport;
5860
5861        fcdiag->bfa             = bfa;
5862        fcdiag->trcmod  = bfa->trcmod;
5863        /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5864        dport->bfa = bfa;
5865        bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5866        bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5867        dport->cbfn = NULL;
5868        dport->cbarg = NULL;
5869        dport->test_state = BFA_DPORT_ST_DISABLED;
5870        memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
5871}
5872
5873static void
5874bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5875{
5876        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5877        struct bfa_dport_s *dport = &fcdiag->dport;
5878
5879        bfa_trc(fcdiag, fcdiag->lb.lock);
5880        if (fcdiag->lb.lock) {
5881                fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5882                fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5883                fcdiag->lb.lock = 0;
5884                bfa_fcdiag_set_busy_status(fcdiag);
5885        }
5886
5887        bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
5888}
5889
5890static void
5891bfa_fcdiag_detach(struct bfa_s *bfa)
5892{
5893}
5894
5895static void
5896bfa_fcdiag_start(struct bfa_s *bfa)
5897{
5898}
5899
5900static void
5901bfa_fcdiag_stop(struct bfa_s *bfa)
5902{
5903}
5904
5905static void
5906bfa_fcdiag_queuetest_timeout(void *cbarg)
5907{
5908        struct bfa_fcdiag_s       *fcdiag = cbarg;
5909        struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5910
5911        bfa_trc(fcdiag, fcdiag->qtest.all);
5912        bfa_trc(fcdiag, fcdiag->qtest.count);
5913
5914        fcdiag->qtest.timer_active = 0;
5915
5916        res->status = BFA_STATUS_ETIMER;
5917        res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5918        if (fcdiag->qtest.all)
5919                res->queue  = fcdiag->qtest.all;
5920
5921        bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5922        fcdiag->qtest.status = BFA_STATUS_ETIMER;
5923        fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5924        fcdiag->qtest.lock = 0;
5925}
5926
5927static bfa_status_t
5928bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5929{
5930        u32     i;
5931        struct bfi_diag_qtest_req_s *req;
5932
5933        req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5934        if (!req)
5935                return BFA_STATUS_DEVBUSY;
5936
5937        /* build host command */
5938        bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5939                bfa_fn_lpu(fcdiag->bfa));
5940
5941        for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5942                req->data[i] = QTEST_PAT_DEFAULT;
5943
5944        bfa_trc(fcdiag, fcdiag->qtest.queue);
5945        /* ring door bell */
5946        bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5947        return BFA_STATUS_OK;
5948}
5949
5950static void
5951bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5952                        bfi_diag_qtest_rsp_t *rsp)
5953{
5954        struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5955        bfa_status_t status = BFA_STATUS_OK;
5956        int i;
5957
5958        /* Check timer, should still be active   */
5959        if (!fcdiag->qtest.timer_active) {
5960                bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5961                return;
5962        }
5963
5964        /* update count */
5965        fcdiag->qtest.count--;
5966
5967        /* Check result */
5968        for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5969                if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5970                        res->status = BFA_STATUS_DATACORRUPTED;
5971                        break;
5972                }
5973        }
5974
5975        if (res->status == BFA_STATUS_OK) {
5976                if (fcdiag->qtest.count > 0) {
5977                        status = bfa_fcdiag_queuetest_send(fcdiag);
5978                        if (status == BFA_STATUS_OK)
5979                                return;
5980                        else
5981                                res->status = status;
5982                } else if (fcdiag->qtest.all > 0 &&
5983                        fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5984                        fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5985                        fcdiag->qtest.queue++;
5986                        status = bfa_fcdiag_queuetest_send(fcdiag);
5987                        if (status == BFA_STATUS_OK)
5988                                return;
5989                        else
5990                                res->status = status;
5991                }
5992        }
5993
5994        /* Stop timer when we comp all queue */
5995        if (fcdiag->qtest.timer_active) {
5996                bfa_timer_stop(&fcdiag->qtest.timer);
5997                fcdiag->qtest.timer_active = 0;
5998        }
5999        res->queue = fcdiag->qtest.queue;
6000        res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
6001        bfa_trc(fcdiag, res->count);
6002        bfa_trc(fcdiag, res->status);
6003        fcdiag->qtest.status = res->status;
6004        fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
6005        fcdiag->qtest.lock = 0;
6006}
6007
6008static void
6009bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
6010                        struct bfi_diag_lb_rsp_s *rsp)
6011{
6012        struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
6013
6014        res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
6015        res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
6016        res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
6017        res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
6018        res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
6019        res->status     = rsp->res.status;
6020        fcdiag->lb.status = rsp->res.status;
6021        bfa_trc(fcdiag, fcdiag->lb.status);
6022        fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
6023        fcdiag->lb.lock = 0;
6024        bfa_fcdiag_set_busy_status(fcdiag);
6025}
6026
6027static bfa_status_t
6028bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
6029                        struct bfa_diag_loopback_s *loopback)
6030{
6031        struct bfi_diag_lb_req_s *lb_req;
6032
6033        lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
6034        if (!lb_req)
6035                return BFA_STATUS_DEVBUSY;
6036
6037        /* build host command */
6038        bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
6039                bfa_fn_lpu(fcdiag->bfa));
6040
6041        lb_req->lb_mode = loopback->lb_mode;
6042        lb_req->speed = loopback->speed;
6043        lb_req->loopcnt = loopback->loopcnt;
6044        lb_req->pattern = loopback->pattern;
6045
6046        /* ring door bell */
6047        bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
6048
6049        bfa_trc(fcdiag, loopback->lb_mode);
6050        bfa_trc(fcdiag, loopback->speed);
6051        bfa_trc(fcdiag, loopback->loopcnt);
6052        bfa_trc(fcdiag, loopback->pattern);
6053        return BFA_STATUS_OK;
6054}
6055
6056/*
6057 *      cpe/rme intr handler
6058 */
6059void
6060bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
6061{
6062        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6063
6064        switch (msg->mhdr.msg_id) {
6065        case BFI_DIAG_I2H_LOOPBACK:
6066                bfa_fcdiag_loopback_comp(fcdiag,
6067                                (struct bfi_diag_lb_rsp_s *) msg);
6068                break;
6069        case BFI_DIAG_I2H_QTEST:
6070                bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
6071                break;
6072        case BFI_DIAG_I2H_DPORT:
6073                bfa_dport_req_comp(&fcdiag->dport,
6074                                (struct bfi_diag_dport_rsp_s *)msg);
6075                break;
6076        case BFI_DIAG_I2H_DPORT_SCN:
6077                bfa_dport_scn(&fcdiag->dport,
6078                                (struct bfi_diag_dport_scn_s *)msg);
6079                break;
6080        default:
6081                bfa_trc(fcdiag, msg->mhdr.msg_id);
6082                WARN_ON(1);
6083        }
6084}
6085
6086/*
6087 *      Loopback test
6088 *
6089 *   @param[in] *bfa            - bfa data struct
6090 *   @param[in] opmode          - port operation mode
6091 *   @param[in] speed           - port speed
6092 *   @param[in] lpcnt           - loop count
6093 *   @param[in] pat                     - pattern to build packet
6094 *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
6095 *   @param[in] cbfn            - callback function
6096 *   @param[in] cbarg           - callback functioin arg
6097 *
6098 *   @param[out]
6099 */
6100bfa_status_t
6101bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
6102                enum bfa_port_speed speed, u32 lpcnt, u32 pat,
6103                struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
6104                void *cbarg)
6105{
6106        struct  bfa_diag_loopback_s loopback;
6107        struct bfa_port_attr_s attr;
6108        bfa_status_t status;
6109        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6110
6111        if (!bfa_iocfc_is_operational(bfa))
6112                return BFA_STATUS_IOC_NON_OP;
6113
6114        /* if port is PBC disabled, return error */
6115        if (bfa_fcport_is_pbcdisabled(bfa)) {
6116                bfa_trc(fcdiag, BFA_STATUS_PBC);
6117                return BFA_STATUS_PBC;
6118        }
6119
6120        if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
6121                bfa_trc(fcdiag, opmode);
6122                return BFA_STATUS_PORT_NOT_DISABLED;
6123        }
6124
6125        /*
6126         * Check if input speed is supported by the port mode
6127         */
6128        if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
6129                if (!(speed == BFA_PORT_SPEED_1GBPS ||
6130                      speed == BFA_PORT_SPEED_2GBPS ||
6131                      speed == BFA_PORT_SPEED_4GBPS ||
6132                      speed == BFA_PORT_SPEED_8GBPS ||
6133                      speed == BFA_PORT_SPEED_16GBPS ||
6134                      speed == BFA_PORT_SPEED_AUTO)) {
6135                        bfa_trc(fcdiag, speed);
6136                        return BFA_STATUS_UNSUPP_SPEED;
6137                }
6138                bfa_fcport_get_attr(bfa, &attr);
6139                bfa_trc(fcdiag, attr.speed_supported);
6140                if (speed > attr.speed_supported)
6141                        return BFA_STATUS_UNSUPP_SPEED;
6142        } else {
6143                if (speed != BFA_PORT_SPEED_10GBPS) {
6144                        bfa_trc(fcdiag, speed);
6145                        return BFA_STATUS_UNSUPP_SPEED;
6146                }
6147        }
6148
6149        /*
6150         * For CT2, 1G is not supported
6151         */
6152        if ((speed == BFA_PORT_SPEED_1GBPS) &&
6153            (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
6154                bfa_trc(fcdiag, speed);
6155                return BFA_STATUS_UNSUPP_SPEED;
6156        }
6157
6158        /* For Mezz card, port speed entered needs to be checked */
6159        if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
6160                if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
6161                        if (!(speed == BFA_PORT_SPEED_1GBPS ||
6162                              speed == BFA_PORT_SPEED_2GBPS ||
6163                              speed == BFA_PORT_SPEED_4GBPS ||
6164                              speed == BFA_PORT_SPEED_8GBPS ||
6165                              speed == BFA_PORT_SPEED_16GBPS ||
6166                              speed == BFA_PORT_SPEED_AUTO))
6167                                return BFA_STATUS_UNSUPP_SPEED;
6168                } else {
6169                        if (speed != BFA_PORT_SPEED_10GBPS)
6170                                return BFA_STATUS_UNSUPP_SPEED;
6171                }
6172        }
6173        /* check to see if fcport is dport */
6174        if (bfa_fcport_is_dport(bfa)) {
6175                bfa_trc(fcdiag, fcdiag->lb.lock);
6176                return BFA_STATUS_DPORT_ENABLED;
6177        }
6178        /* check to see if there is another destructive diag cmd running */
6179        if (fcdiag->lb.lock) {
6180                bfa_trc(fcdiag, fcdiag->lb.lock);
6181                return BFA_STATUS_DEVBUSY;
6182        }
6183
6184        fcdiag->lb.lock = 1;
6185        loopback.lb_mode = opmode;
6186        loopback.speed = speed;
6187        loopback.loopcnt = lpcnt;
6188        loopback.pattern = pat;
6189        fcdiag->lb.result = result;
6190        fcdiag->lb.cbfn = cbfn;
6191        fcdiag->lb.cbarg = cbarg;
6192        memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
6193        bfa_fcdiag_set_busy_status(fcdiag);
6194
6195        /* Send msg to fw */
6196        status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
6197        return status;
6198}
6199
6200/*
6201 *      DIAG queue test command
6202 *
6203 *   @param[in] *bfa            - bfa data struct
6204 *   @param[in] force           - 1: don't do ioc op checking
6205 *   @param[in] queue           - queue no. to test
6206 *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
6207 *   @param[in] cbfn            - callback function
6208 *   @param[in] *cbarg          - callback functioin arg
6209 *
6210 *   @param[out]
6211 */
6212bfa_status_t
6213bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
6214                struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
6215                void *cbarg)
6216{
6217        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6218        bfa_status_t status;
6219        bfa_trc(fcdiag, force);
6220        bfa_trc(fcdiag, queue);
6221
6222        if (!force && !bfa_iocfc_is_operational(bfa))
6223                return BFA_STATUS_IOC_NON_OP;
6224
6225        /* check to see if there is another destructive diag cmd running */
6226        if (fcdiag->qtest.lock) {
6227                bfa_trc(fcdiag, fcdiag->qtest.lock);
6228                return BFA_STATUS_DEVBUSY;
6229        }
6230
6231        /* Initialization */
6232        fcdiag->qtest.lock = 1;
6233        fcdiag->qtest.cbfn = cbfn;
6234        fcdiag->qtest.cbarg = cbarg;
6235        fcdiag->qtest.result = result;
6236        fcdiag->qtest.count = QTEST_CNT_DEFAULT;
6237
6238        /* Init test results */
6239        fcdiag->qtest.result->status = BFA_STATUS_OK;
6240        fcdiag->qtest.result->count  = 0;
6241
6242        /* send */
6243        if (queue < BFI_IOC_MAX_CQS) {
6244                fcdiag->qtest.result->queue  = (u8)queue;
6245                fcdiag->qtest.queue = (u8)queue;
6246                fcdiag->qtest.all   = 0;
6247        } else {
6248                fcdiag->qtest.result->queue  = 0;
6249                fcdiag->qtest.queue = 0;
6250                fcdiag->qtest.all   = 1;
6251        }
6252        status = bfa_fcdiag_queuetest_send(fcdiag);
6253
6254        /* Start a timer */
6255        if (status == BFA_STATUS_OK) {
6256                bfa_timer_start(bfa, &fcdiag->qtest.timer,
6257                                bfa_fcdiag_queuetest_timeout, fcdiag,
6258                                BFA_DIAG_QTEST_TOV);
6259                fcdiag->qtest.timer_active = 1;
6260        }
6261        return status;
6262}
6263
6264/*
6265 * DIAG PLB is running
6266 *
6267 *   @param[in] *bfa    - bfa data struct
6268 *
6269 *   @param[out]
6270 */
6271bfa_status_t
6272bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
6273{
6274        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6275        return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
6276}
6277
6278/*
6279 *      D-port
6280 */
6281#define bfa_dport_result_start(__dport, __mode) do {                    \
6282                (__dport)->result.start_time = bfa_get_log_time();      \
6283                (__dport)->result.status = DPORT_TEST_ST_INPRG;         \
6284                (__dport)->result.mode = (__mode);                      \
6285                (__dport)->result.rp_pwwn = (__dport)->rp_pwwn;         \
6286                (__dport)->result.rp_nwwn = (__dport)->rp_nwwn;         \
6287                (__dport)->result.lpcnt = (__dport)->lpcnt;             \
6288} while (0)
6289
6290static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
6291                                        enum bfi_dport_req req);
6292static void
6293bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
6294{
6295        if (dport->cbfn != NULL) {
6296                dport->cbfn(dport->cbarg, bfa_status);
6297                dport->cbfn = NULL;
6298                dport->cbarg = NULL;
6299        }
6300}
6301
6302static void
6303bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6304{
6305        bfa_trc(dport->bfa, event);
6306
6307        switch (event) {
6308        case BFA_DPORT_SM_ENABLE:
6309                bfa_fcport_dportenable(dport->bfa);
6310                if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
6311                        bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6312                else
6313                        bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
6314                break;
6315
6316        case BFA_DPORT_SM_DISABLE:
6317                /* Already disabled */
6318                break;
6319
6320        case BFA_DPORT_SM_HWFAIL:
6321                /* ignore */
6322                break;
6323
6324        case BFA_DPORT_SM_SCN:
6325                if (dport->i2hmsg.scn.state ==  BFI_DPORT_SCN_DDPORT_ENABLE) {
6326                        bfa_fcport_ddportenable(dport->bfa);
6327                        dport->dynamic = BFA_TRUE;
6328                        dport->test_state = BFA_DPORT_ST_NOTSTART;
6329                        bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6330                } else {
6331                        bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6332                        WARN_ON(1);
6333                }
6334                break;
6335
6336        default:
6337                bfa_sm_fault(dport->bfa, event);
6338        }
6339}
6340
6341static void
6342bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6343                            enum bfa_dport_sm_event event)
6344{
6345        bfa_trc(dport->bfa, event);
6346
6347        switch (event) {
6348        case BFA_DPORT_SM_QRESUME:
6349                bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6350                bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6351                break;
6352
6353        case BFA_DPORT_SM_HWFAIL:
6354                bfa_reqq_wcancel(&dport->reqq_wait);
6355                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6356                bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6357                break;
6358
6359        default:
6360                bfa_sm_fault(dport->bfa, event);
6361        }
6362}
6363
6364static void
6365bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6366{
6367        bfa_trc(dport->bfa, event);
6368
6369        switch (event) {
6370        case BFA_DPORT_SM_FWRSP:
6371                memset(&dport->result, 0,
6372                                sizeof(struct bfa_diag_dport_result_s));
6373                if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6374                        dport->test_state = BFA_DPORT_ST_NO_SFP;
6375                } else {
6376                        dport->test_state = BFA_DPORT_ST_INP;
6377                        bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
6378                }
6379                bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6380                break;
6381
6382        case BFA_DPORT_SM_REQFAIL:
6383                dport->test_state = BFA_DPORT_ST_DISABLED;
6384                bfa_fcport_dportdisable(dport->bfa);
6385                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6386                break;
6387
6388        case BFA_DPORT_SM_HWFAIL:
6389                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6390                bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6391                break;
6392
6393        default:
6394                bfa_sm_fault(dport->bfa, event);
6395        }
6396}
6397
6398static void
6399bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6400{
6401        bfa_trc(dport->bfa, event);
6402
6403        switch (event) {
6404        case BFA_DPORT_SM_START:
6405                if (bfa_dport_send_req(dport, BFI_DPORT_START))
6406                        bfa_sm_set_state(dport, bfa_dport_sm_starting);
6407                else
6408                        bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
6409                break;
6410
6411        case BFA_DPORT_SM_DISABLE:
6412                bfa_fcport_dportdisable(dport->bfa);
6413                if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6414                        bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6415                else
6416                        bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6417                break;
6418
6419        case BFA_DPORT_SM_HWFAIL:
6420                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6421                break;
6422
6423        case BFA_DPORT_SM_SCN:
6424                switch (dport->i2hmsg.scn.state) {
6425                case BFI_DPORT_SCN_TESTCOMP:
6426                        dport->test_state = BFA_DPORT_ST_COMP;
6427                        break;
6428
6429                case BFI_DPORT_SCN_TESTSTART:
6430                        dport->test_state = BFA_DPORT_ST_INP;
6431                        break;
6432
6433                case BFI_DPORT_SCN_TESTSKIP:
6434                case BFI_DPORT_SCN_SUBTESTSTART:
6435                        /* no state change */
6436                        break;
6437
6438                case BFI_DPORT_SCN_SFP_REMOVED:
6439                        dport->test_state = BFA_DPORT_ST_NO_SFP;
6440                        break;
6441
6442                case BFI_DPORT_SCN_DDPORT_DISABLE:
6443                        bfa_fcport_ddportdisable(dport->bfa);
6444
6445                        if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
6446                                bfa_sm_set_state(dport,
6447                                         bfa_dport_sm_dynamic_disabling);
6448                        else
6449                                bfa_sm_set_state(dport,
6450                                         bfa_dport_sm_dynamic_disabling_qwait);
6451                        break;
6452
6453                case BFI_DPORT_SCN_FCPORT_DISABLE:
6454                        bfa_fcport_ddportdisable(dport->bfa);
6455
6456                        bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6457                        dport->dynamic = BFA_FALSE;
6458                        break;
6459
6460                default:
6461                        bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6462                        bfa_sm_fault(dport->bfa, event);
6463                }
6464                break;
6465        default:
6466                bfa_sm_fault(dport->bfa, event);
6467        }
6468}
6469
6470static void
6471bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6472                             enum bfa_dport_sm_event event)
6473{
6474        bfa_trc(dport->bfa, event);
6475
6476        switch (event) {
6477        case BFA_DPORT_SM_QRESUME:
6478                bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6479                bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6480                break;
6481
6482        case BFA_DPORT_SM_HWFAIL:
6483                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6484                bfa_reqq_wcancel(&dport->reqq_wait);
6485                bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6486                break;
6487
6488        case BFA_DPORT_SM_SCN:
6489                /* ignore */
6490                break;
6491
6492        default:
6493                bfa_sm_fault(dport->bfa, event);
6494        }
6495}
6496
6497static void
6498bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6499{
6500        bfa_trc(dport->bfa, event);
6501
6502        switch (event) {
6503        case BFA_DPORT_SM_FWRSP:
6504                dport->test_state = BFA_DPORT_ST_DISABLED;
6505                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6506                break;
6507
6508        case BFA_DPORT_SM_HWFAIL:
6509                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6510                bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6511                break;
6512
6513        case BFA_DPORT_SM_SCN:
6514                /* no state change */
6515                break;
6516
6517        default:
6518                bfa_sm_fault(dport->bfa, event);
6519        }
6520}
6521
6522static void
6523bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
6524                            enum bfa_dport_sm_event event)
6525{
6526        bfa_trc(dport->bfa, event);
6527
6528        switch (event) {
6529        case BFA_DPORT_SM_QRESUME:
6530                bfa_sm_set_state(dport, bfa_dport_sm_starting);
6531                bfa_dport_send_req(dport, BFI_DPORT_START);
6532                break;
6533
6534        case BFA_DPORT_SM_HWFAIL:
6535                bfa_reqq_wcancel(&dport->reqq_wait);
6536                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6537                bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6538                break;
6539
6540        default:
6541                bfa_sm_fault(dport->bfa, event);
6542        }
6543}
6544
6545static void
6546bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6547{
6548        bfa_trc(dport->bfa, event);
6549
6550        switch (event) {
6551        case BFA_DPORT_SM_FWRSP:
6552                memset(&dport->result, 0,
6553                                sizeof(struct bfa_diag_dport_result_s));
6554                if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6555                        dport->test_state = BFA_DPORT_ST_NO_SFP;
6556                } else {
6557                        dport->test_state = BFA_DPORT_ST_INP;
6558                        bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
6559                }
6560                /* fall thru */
6561
6562        case BFA_DPORT_SM_REQFAIL:
6563                bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6564                break;
6565
6566        case BFA_DPORT_SM_HWFAIL:
6567                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6568                bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6569                break;
6570
6571        default:
6572                bfa_sm_fault(dport->bfa, event);
6573        }
6574}
6575
6576static void
6577bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
6578                               enum bfa_dport_sm_event event)
6579{
6580        bfa_trc(dport->bfa, event);
6581
6582        switch (event) {
6583        case BFA_DPORT_SM_SCN:
6584                switch (dport->i2hmsg.scn.state) {
6585                case BFI_DPORT_SCN_DDPORT_DISABLED:
6586                        bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6587                        dport->dynamic = BFA_FALSE;
6588                        bfa_fcport_enable(dport->bfa);
6589                        break;
6590
6591                default:
6592                        bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6593                        bfa_sm_fault(dport->bfa, event);
6594
6595                }
6596                break;
6597
6598        case BFA_DPORT_SM_HWFAIL:
6599                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6600                bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6601                break;
6602
6603        default:
6604                bfa_sm_fault(dport->bfa, event);
6605        }
6606}
6607
6608static void
6609bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
6610                            enum bfa_dport_sm_event event)
6611{
6612        bfa_trc(dport->bfa, event);
6613
6614        switch (event) {
6615        case BFA_DPORT_SM_QRESUME:
6616                bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
6617                bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
6618                break;
6619
6620        case BFA_DPORT_SM_HWFAIL:
6621                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6622                bfa_reqq_wcancel(&dport->reqq_wait);
6623                bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6624                break;
6625
6626        case BFA_DPORT_SM_SCN:
6627                /* ignore */
6628                break;
6629
6630        default:
6631                bfa_sm_fault(dport->bfa, event);
6632        }
6633}
6634
6635static bfa_boolean_t
6636bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6637{
6638        struct bfi_diag_dport_req_s *m;
6639
6640        /*
6641         * check for room in queue to send request now
6642         */
6643        m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6644        if (!m) {
6645                bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6646                return BFA_FALSE;
6647        }
6648
6649        bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6650                    bfa_fn_lpu(dport->bfa));
6651        m->req  = req;
6652        if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
6653                m->lpcnt = cpu_to_be32(dport->lpcnt);
6654                m->payload = cpu_to_be32(dport->payload);
6655        }
6656
6657        /*
6658         * queue I/O message to firmware
6659         */
6660        bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6661
6662        return BFA_TRUE;
6663}
6664
6665static void
6666bfa_dport_qresume(void *cbarg)
6667{
6668        struct bfa_dport_s *dport = cbarg;
6669
6670        bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6671}
6672
6673static void
6674bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
6675{
6676        msg->status = cpu_to_be32(msg->status);
6677        dport->i2hmsg.rsp.status = msg->status;
6678        dport->rp_pwwn = msg->pwwn;
6679        dport->rp_nwwn = msg->nwwn;
6680
6681        if ((msg->status == BFA_STATUS_OK) ||
6682            (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
6683                bfa_trc(dport->bfa, msg->status);
6684                bfa_trc(dport->bfa, dport->rp_pwwn);
6685                bfa_trc(dport->bfa, dport->rp_nwwn);
6686                bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6687
6688        } else {
6689                bfa_trc(dport->bfa, msg->status);
6690                bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
6691        }
6692        bfa_cb_fcdiag_dport(dport, msg->status);
6693}
6694
6695static bfa_boolean_t
6696bfa_dport_is_sending_req(struct bfa_dport_s *dport)
6697{
6698        if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling)      ||
6699            bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6700            bfa_sm_cmp_state(dport, bfa_dport_sm_disabling)     ||
6701            bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
6702            bfa_sm_cmp_state(dport, bfa_dport_sm_starting)      ||
6703            bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
6704                return BFA_TRUE;
6705        } else {
6706                return BFA_FALSE;
6707        }
6708}
6709
6710static void
6711bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
6712{
6713        int i;
6714        uint8_t subtesttype;
6715
6716        bfa_trc(dport->bfa, msg->state);
6717        dport->i2hmsg.scn.state = msg->state;
6718
6719        switch (dport->i2hmsg.scn.state) {
6720        case BFI_DPORT_SCN_TESTCOMP:
6721                dport->result.end_time = bfa_get_log_time();
6722                bfa_trc(dport->bfa, dport->result.end_time);
6723
6724                dport->result.status = msg->info.testcomp.status;
6725                bfa_trc(dport->bfa, dport->result.status);
6726
6727                dport->result.roundtrip_latency =
6728                        cpu_to_be32(msg->info.testcomp.latency);
6729                dport->result.est_cable_distance =
6730                        cpu_to_be32(msg->info.testcomp.distance);
6731                dport->result.buffer_required =
6732                        be16_to_cpu(msg->info.testcomp.numbuffer);
6733
6734                dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
6735                dport->result.speed = msg->info.testcomp.speed;
6736
6737                bfa_trc(dport->bfa, dport->result.roundtrip_latency);
6738                bfa_trc(dport->bfa, dport->result.est_cable_distance);
6739                bfa_trc(dport->bfa, dport->result.buffer_required);
6740                bfa_trc(dport->bfa, dport->result.frmsz);
6741                bfa_trc(dport->bfa, dport->result.speed);
6742
6743                for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
6744                        dport->result.subtest[i].status =
6745                                msg->info.testcomp.subtest_status[i];
6746                        bfa_trc(dport->bfa, dport->result.subtest[i].status);
6747                }
6748                break;
6749
6750        case BFI_DPORT_SCN_TESTSKIP:
6751        case BFI_DPORT_SCN_DDPORT_ENABLE:
6752                memset(&dport->result, 0,
6753                                sizeof(struct bfa_diag_dport_result_s));
6754                break;
6755
6756        case BFI_DPORT_SCN_TESTSTART:
6757                memset(&dport->result, 0,
6758                                sizeof(struct bfa_diag_dport_result_s));
6759                dport->rp_pwwn = msg->info.teststart.pwwn;
6760                dport->rp_nwwn = msg->info.teststart.nwwn;
6761                dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
6762                bfa_dport_result_start(dport, msg->info.teststart.mode);
6763                break;
6764
6765        case BFI_DPORT_SCN_SUBTESTSTART:
6766                subtesttype = msg->info.teststart.type;
6767                dport->result.subtest[subtesttype].start_time =
6768                        bfa_get_log_time();
6769                dport->result.subtest[subtesttype].status =
6770                        DPORT_TEST_ST_INPRG;
6771
6772                bfa_trc(dport->bfa, subtesttype);
6773                bfa_trc(dport->bfa,
6774                        dport->result.subtest[subtesttype].start_time);
6775                break;
6776
6777        case BFI_DPORT_SCN_SFP_REMOVED:
6778        case BFI_DPORT_SCN_DDPORT_DISABLED:
6779        case BFI_DPORT_SCN_DDPORT_DISABLE:
6780        case BFI_DPORT_SCN_FCPORT_DISABLE:
6781                dport->result.status = DPORT_TEST_ST_IDLE;
6782                break;
6783
6784        default:
6785                bfa_sm_fault(dport->bfa, msg->state);
6786        }
6787
6788        bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
6789}
6790
6791/*
6792 * Dport enable
6793 *
6794 * @param[in] *bfa            - bfa data struct
6795 */
6796bfa_status_t
6797bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6798                                bfa_cb_diag_t cbfn, void *cbarg)
6799{
6800        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6801        struct bfa_dport_s  *dport = &fcdiag->dport;
6802
6803        /*
6804         * Dport is not support in MEZZ card
6805         */
6806        if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6807                bfa_trc(dport->bfa, BFA_STATUS_PBC);
6808                return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6809        }
6810
6811        /*
6812         * Dport is supported in CT2 or above
6813         */
6814        if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
6815                bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
6816                return BFA_STATUS_FEATURE_NOT_SUPPORTED;
6817        }
6818
6819        /*
6820         * Check to see if IOC is down
6821        */
6822        if (!bfa_iocfc_is_operational(bfa))
6823                return BFA_STATUS_IOC_NON_OP;
6824
6825        /* if port is PBC disabled, return error */
6826        if (bfa_fcport_is_pbcdisabled(bfa)) {
6827                bfa_trc(dport->bfa, BFA_STATUS_PBC);
6828                return BFA_STATUS_PBC;
6829        }
6830
6831        /*
6832         * Check if port mode is FC port
6833         */
6834        if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6835                bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6836                return BFA_STATUS_CMD_NOTSUPP_CNA;
6837        }
6838
6839        /*
6840         * Check if port is in LOOP mode
6841         */
6842        if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6843            (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6844                bfa_trc(dport->bfa, 0);
6845                return BFA_STATUS_TOPOLOGY_LOOP;
6846        }
6847
6848        /*
6849         * Check if port is TRUNK mode
6850         */
6851        if (bfa_fcport_is_trunk_enabled(bfa)) {
6852                bfa_trc(dport->bfa, 0);
6853                return BFA_STATUS_ERROR_TRUNK_ENABLED;
6854        }
6855
6856        /*
6857         * Check if diag loopback is running
6858         */
6859        if (bfa_fcdiag_lb_is_running(bfa)) {
6860                bfa_trc(dport->bfa, 0);
6861                return BFA_STATUS_DIAG_BUSY;
6862        }
6863
6864        /*
6865         * Check to see if port is disable or in dport state
6866         */
6867        if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6868            (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6869                bfa_trc(dport->bfa, 0);
6870                return BFA_STATUS_PORT_NOT_DISABLED;
6871        }
6872
6873        /*
6874         * Check if dport is in dynamic mode
6875         */
6876        if (dport->dynamic)
6877                return BFA_STATUS_DDPORT_ERR;
6878
6879        /*
6880         * Check if dport is busy
6881         */
6882        if (bfa_dport_is_sending_req(dport))
6883                return BFA_STATUS_DEVBUSY;
6884
6885        /*
6886         * Check if dport is already enabled
6887         */
6888        if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6889                bfa_trc(dport->bfa, 0);
6890                return BFA_STATUS_DPORT_ENABLED;
6891        }
6892
6893        bfa_trc(dport->bfa, lpcnt);
6894        bfa_trc(dport->bfa, pat);
6895        dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6896        dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6897        dport->cbfn = cbfn;
6898        dport->cbarg = cbarg;
6899
6900        bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6901        return BFA_STATUS_OK;
6902}
6903
6904/*
6905 *      Dport disable
6906 *
6907 *      @param[in] *bfa            - bfa data struct
6908 */
6909bfa_status_t
6910bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6911{
6912        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6913        struct bfa_dport_s *dport = &fcdiag->dport;
6914
6915        if (bfa_ioc_is_disabled(&bfa->ioc))
6916                return BFA_STATUS_IOC_DISABLED;
6917
6918        /* if port is PBC disabled, return error */
6919        if (bfa_fcport_is_pbcdisabled(bfa)) {
6920                bfa_trc(dport->bfa, BFA_STATUS_PBC);
6921                return BFA_STATUS_PBC;
6922        }
6923
6924        /*
6925         * Check if dport is in dynamic mode
6926         */
6927        if (dport->dynamic) {
6928                return BFA_STATUS_DDPORT_ERR;
6929        }
6930
6931        /*
6932         * Check to see if port is disable or in dport state
6933         */
6934        if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6935            (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6936                bfa_trc(dport->bfa, 0);
6937                return BFA_STATUS_PORT_NOT_DISABLED;
6938        }
6939
6940        /*
6941         * Check if dport is busy
6942         */
6943        if (bfa_dport_is_sending_req(dport))
6944                return BFA_STATUS_DEVBUSY;
6945
6946        /*
6947         * Check if dport is already disabled
6948         */
6949        if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6950                bfa_trc(dport->bfa, 0);
6951                return BFA_STATUS_DPORT_DISABLED;
6952        }
6953
6954        dport->cbfn = cbfn;
6955        dport->cbarg = cbarg;
6956
6957        bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6958        return BFA_STATUS_OK;
6959}
6960
6961/*
6962 * Dport start -- restart dport test
6963 *
6964 *   @param[in] *bfa            - bfa data struct
6965 */
6966bfa_status_t
6967bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6968                        bfa_cb_diag_t cbfn, void *cbarg)
6969{
6970        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6971        struct bfa_dport_s *dport = &fcdiag->dport;
6972
6973        /*
6974         * Check to see if IOC is down
6975         */
6976        if (!bfa_iocfc_is_operational(bfa))
6977                return BFA_STATUS_IOC_NON_OP;
6978
6979        /*
6980         * Check if dport is in dynamic mode
6981         */
6982        if (dport->dynamic)
6983                return BFA_STATUS_DDPORT_ERR;
6984
6985        /*
6986         * Check if dport is busy
6987         */
6988        if (bfa_dport_is_sending_req(dport))
6989                return BFA_STATUS_DEVBUSY;
6990
6991        /*
6992         * Check if dport is in enabled state.
6993         * Test can only be restart when previous test has completed
6994         */
6995        if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6996                bfa_trc(dport->bfa, 0);
6997                return BFA_STATUS_DPORT_DISABLED;
6998
6999        } else {
7000                if (dport->test_state == BFA_DPORT_ST_NO_SFP)
7001                        return BFA_STATUS_DPORT_INV_SFP;
7002
7003                if (dport->test_state == BFA_DPORT_ST_INP)
7004                        return BFA_STATUS_DEVBUSY;
7005
7006                WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
7007        }
7008
7009        bfa_trc(dport->bfa, lpcnt);
7010        bfa_trc(dport->bfa, pat);
7011
7012        dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
7013        dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
7014
7015        dport->cbfn = cbfn;
7016        dport->cbarg = cbarg;
7017
7018        bfa_sm_send_event(dport, BFA_DPORT_SM_START);
7019        return BFA_STATUS_OK;
7020}
7021
7022/*
7023 * Dport show -- return dport test result
7024 *
7025 *   @param[in] *bfa            - bfa data struct
7026 */
7027bfa_status_t
7028bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
7029{
7030        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
7031        struct bfa_dport_s *dport = &fcdiag->dport;
7032
7033        /*
7034         * Check to see if IOC is down
7035         */
7036        if (!bfa_iocfc_is_operational(bfa))
7037                return BFA_STATUS_IOC_NON_OP;
7038
7039        /*
7040         * Check if dport is busy
7041         */
7042        if (bfa_dport_is_sending_req(dport))
7043                return BFA_STATUS_DEVBUSY;
7044
7045        /*
7046         * Check if dport is in enabled state.
7047         */
7048        if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
7049                bfa_trc(dport->bfa, 0);
7050                return BFA_STATUS_DPORT_DISABLED;
7051
7052        }
7053
7054        /*
7055         * Check if there is SFP
7056         */
7057        if (dport->test_state == BFA_DPORT_ST_NO_SFP)
7058                return BFA_STATUS_DPORT_INV_SFP;
7059
7060        memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
7061
7062        return BFA_STATUS_OK;
7063}
7064