linux/drivers/scsi/bfa/bfa_svc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
   3 * All rights reserved
   4 * www.brocade.com
   5 *
   6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms of the GNU General Public License (GPL) Version 2 as
  10 * published by the Free Software Foundation
  11 *
  12 * This program is distributed in the hope that it will be useful, but
  13 * WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 */
  17
  18#include "bfad_drv.h"
  19#include "bfad_im.h"
  20#include "bfa_plog.h"
  21#include "bfa_cs.h"
  22#include "bfa_modules.h"
  23
  24BFA_TRC_FILE(HAL, FCXP);
  25BFA_MODULE(fcdiag);
  26BFA_MODULE(fcxp);
  27BFA_MODULE(sgpg);
  28BFA_MODULE(lps);
  29BFA_MODULE(fcport);
  30BFA_MODULE(rport);
  31BFA_MODULE(uf);
  32
  33/*
  34 * LPS related definitions
  35 */
  36#define BFA_LPS_MIN_LPORTS      (1)
  37#define BFA_LPS_MAX_LPORTS      (256)
  38
  39/*
  40 * Maximum Vports supported per physical port or vf.
  41 */
  42#define BFA_LPS_MAX_VPORTS_SUPP_CB  255
  43#define BFA_LPS_MAX_VPORTS_SUPP_CT  190
  44
  45
  46/*
  47 * FC PORT related definitions
  48 */
  49/*
  50 * The port is considered disabled if corresponding physical port or IOC are
  51 * disabled explicitly
  52 */
  53#define BFA_PORT_IS_DISABLED(bfa) \
  54        ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
  55        (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
  56
  57/*
  58 * BFA port state machine events
  59 */
  60enum bfa_fcport_sm_event {
  61        BFA_FCPORT_SM_START     = 1,    /*  start port state machine    */
  62        BFA_FCPORT_SM_STOP      = 2,    /*  stop port state machine     */
  63        BFA_FCPORT_SM_ENABLE    = 3,    /*  enable port         */
  64        BFA_FCPORT_SM_DISABLE   = 4,    /*  disable port state machine */
  65        BFA_FCPORT_SM_FWRSP     = 5,    /*  firmware enable/disable rsp */
  66        BFA_FCPORT_SM_LINKUP    = 6,    /*  firmware linkup event       */
  67        BFA_FCPORT_SM_LINKDOWN  = 7,    /*  firmware linkup down        */
  68        BFA_FCPORT_SM_QRESUME   = 8,    /*  CQ space available  */
  69        BFA_FCPORT_SM_HWFAIL    = 9,    /*  IOC h/w failure             */
  70        BFA_FCPORT_SM_DPORTENABLE = 10, /*  enable dport      */
  71        BFA_FCPORT_SM_DPORTDISABLE = 11,/*  disable dport     */
  72        BFA_FCPORT_SM_FAA_MISCONFIG = 12,       /* FAA misconfiguratin */
  73};
  74
  75/*
  76 * BFA port link notification state machine events
  77 */
  78
  79enum bfa_fcport_ln_sm_event {
  80        BFA_FCPORT_LN_SM_LINKUP         = 1,    /*  linkup event        */
  81        BFA_FCPORT_LN_SM_LINKDOWN       = 2,    /*  linkdown event      */
  82        BFA_FCPORT_LN_SM_NOTIFICATION   = 3     /*  done notification   */
  83};
  84
  85/*
  86 * RPORT related definitions
  87 */
  88#define bfa_rport_offline_cb(__rp) do {                                 \
  89        if ((__rp)->bfa->fcs)                                           \
  90                bfa_cb_rport_offline((__rp)->rport_drv);      \
  91        else {                                                          \
  92                bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
  93                                __bfa_cb_rport_offline, (__rp));      \
  94        }                                                               \
  95} while (0)
  96
  97#define bfa_rport_online_cb(__rp) do {                                  \
  98        if ((__rp)->bfa->fcs)                                           \
  99                bfa_cb_rport_online((__rp)->rport_drv);      \
 100        else {                                                          \
 101                bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
 102                                  __bfa_cb_rport_online, (__rp));      \
 103                }                                                       \
 104} while (0)
 105
 106/*
 107 * forward declarations FCXP related functions
 108 */
 109static void     __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
 110static void     hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
 111                                struct bfi_fcxp_send_rsp_s *fcxp_rsp);
 112static void     hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
 113                                struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
 114static void     bfa_fcxp_qresume(void *cbarg);
 115static void     bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
 116                                struct bfi_fcxp_send_req_s *send_req);
 117
 118/*
 119 * forward declarations for LPS functions
 120 */
 121static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
 122                struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
 123static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
 124                                struct bfa_iocfc_cfg_s *cfg,
 125                                struct bfa_pcidev_s *pcidev);
 126static void bfa_lps_detach(struct bfa_s *bfa);
 127static void bfa_lps_start(struct bfa_s *bfa);
 128static void bfa_lps_stop(struct bfa_s *bfa);
 129static void bfa_lps_iocdisable(struct bfa_s *bfa);
 130static void bfa_lps_login_rsp(struct bfa_s *bfa,
 131                                struct bfi_lps_login_rsp_s *rsp);
 132static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
 133static void bfa_lps_logout_rsp(struct bfa_s *bfa,
 134                                struct bfi_lps_logout_rsp_s *rsp);
 135static void bfa_lps_reqq_resume(void *lps_arg);
 136static void bfa_lps_free(struct bfa_lps_s *lps);
 137static void bfa_lps_send_login(struct bfa_lps_s *lps);
 138static void bfa_lps_send_logout(struct bfa_lps_s *lps);
 139static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
 140static void bfa_lps_login_comp(struct bfa_lps_s *lps);
 141static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
 142static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
 143
 144/*
 145 * forward declaration for LPS state machine
 146 */
 147static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
 148static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
 149static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
 150                                        event);
 151static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
 152static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
 153                                        enum bfa_lps_event event);
 154static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
 155static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
 156                                        event);
 157
 158/*
 159 * forward declaration for FC Port functions
 160 */
 161static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
 162static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
 163static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
 164static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
 165static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
 166static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
 167static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
 168                        enum bfa_port_linkstate event, bfa_boolean_t trunk);
 169static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
 170                                enum bfa_port_linkstate event);
 171static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
 172static void bfa_fcport_stats_get_timeout(void *cbarg);
 173static void bfa_fcport_stats_clr_timeout(void *cbarg);
 174static void bfa_trunk_iocdisable(struct bfa_s *bfa);
 175
 176/*
 177 * forward declaration for FC PORT state machine
 178 */
 179static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
 180                                        enum bfa_fcport_sm_event event);
 181static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
 182                                        enum bfa_fcport_sm_event event);
 183static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
 184                                        enum bfa_fcport_sm_event event);
 185static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
 186                                        enum bfa_fcport_sm_event event);
 187static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
 188                                        enum bfa_fcport_sm_event event);
 189static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
 190                                        enum bfa_fcport_sm_event event);
 191static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
 192                                        enum bfa_fcport_sm_event event);
 193static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
 194                                        enum bfa_fcport_sm_event event);
 195static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
 196                                        enum bfa_fcport_sm_event event);
 197static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
 198                                        enum bfa_fcport_sm_event event);
 199static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
 200                                        enum bfa_fcport_sm_event event);
 201static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
 202                                        enum bfa_fcport_sm_event event);
 203static void     bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
 204                                        enum bfa_fcport_sm_event event);
 205static void     bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
 206                                        enum bfa_fcport_sm_event event);
 207
 208static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
 209                                        enum bfa_fcport_ln_sm_event event);
 210static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
 211                                        enum bfa_fcport_ln_sm_event event);
 212static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
 213                                        enum bfa_fcport_ln_sm_event event);
 214static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
 215                                        enum bfa_fcport_ln_sm_event event);
 216static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
 217                                        enum bfa_fcport_ln_sm_event event);
 218static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
 219                                        enum bfa_fcport_ln_sm_event event);
 220static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
 221                                        enum bfa_fcport_ln_sm_event event);
 222
 223static struct bfa_sm_table_s hal_port_sm_table[] = {
 224        {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
 225        {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
 226        {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
 227        {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
 228        {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
 229        {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
 230        {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
 231        {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
 232        {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
 233        {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
 234        {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
 235        {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
 236        {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
 237        {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
 238};
 239
 240
 241/*
 242 * forward declaration for RPORT related functions
 243 */
 244static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
 245static void             bfa_rport_free(struct bfa_rport_s *rport);
 246static bfa_boolean_t    bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
 247static bfa_boolean_t    bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
 248static bfa_boolean_t    bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
 249static void             __bfa_cb_rport_online(void *cbarg,
 250                                                bfa_boolean_t complete);
 251static void             __bfa_cb_rport_offline(void *cbarg,
 252                                                bfa_boolean_t complete);
 253
 254/*
 255 * forward declaration for RPORT state machine
 256 */
 257static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
 258                                        enum bfa_rport_event event);
 259static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
 260                                        enum bfa_rport_event event);
 261static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
 262                                        enum bfa_rport_event event);
 263static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
 264                                        enum bfa_rport_event event);
 265static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
 266                                        enum bfa_rport_event event);
 267static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
 268                                        enum bfa_rport_event event);
 269static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
 270                                        enum bfa_rport_event event);
 271static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
 272                                        enum bfa_rport_event event);
 273static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
 274                                        enum bfa_rport_event event);
 275static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
 276                                        enum bfa_rport_event event);
 277static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
 278                                        enum bfa_rport_event event);
 279static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
 280                                        enum bfa_rport_event event);
 281static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
 282                                        enum bfa_rport_event event);
 283
 284/*
 285 * PLOG related definitions
 286 */
 287static int
 288plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
 289{
 290        if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
 291                (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
 292                return 1;
 293
 294        if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
 295                (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
 296                return 1;
 297
 298        return 0;
 299}
 300
 301static u64
 302bfa_get_log_time(void)
 303{
 304        u64 system_time = 0;
 305        struct timeval tv;
 306        do_gettimeofday(&tv);
 307
 308        /* We are interested in seconds only. */
 309        system_time = tv.tv_sec;
 310        return system_time;
 311}
 312
 313static void
 314bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
 315{
 316        u16 tail;
 317        struct bfa_plog_rec_s *pl_recp;
 318
 319        if (plog->plog_enabled == 0)
 320                return;
 321
 322        if (plkd_validate_logrec(pl_rec)) {
 323                WARN_ON(1);
 324                return;
 325        }
 326
 327        tail = plog->tail;
 328
 329        pl_recp = &(plog->plog_recs[tail]);
 330
 331        memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
 332
 333        pl_recp->tv = bfa_get_log_time();
 334        BFA_PL_LOG_REC_INCR(plog->tail);
 335
 336        if (plog->head == plog->tail)
 337                BFA_PL_LOG_REC_INCR(plog->head);
 338}
 339
 340void
 341bfa_plog_init(struct bfa_plog_s *plog)
 342{
 343        memset((char *)plog, 0, sizeof(struct bfa_plog_s));
 344
 345        memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
 346        plog->head = plog->tail = 0;
 347        plog->plog_enabled = 1;
 348}
 349
 350void
 351bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 352                enum bfa_plog_eid event,
 353                u16 misc, char *log_str)
 354{
 355        struct bfa_plog_rec_s  lp;
 356
 357        if (plog->plog_enabled) {
 358                memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 359                lp.mid = mid;
 360                lp.eid = event;
 361                lp.log_type = BFA_PL_LOG_TYPE_STRING;
 362                lp.misc = misc;
 363                strncpy(lp.log_entry.string_log, log_str,
 364                        BFA_PL_STRING_LOG_SZ - 1);
 365                lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
 366                bfa_plog_add(plog, &lp);
 367        }
 368}
 369
 370void
 371bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 372                enum bfa_plog_eid event,
 373                u16 misc, u32 *intarr, u32 num_ints)
 374{
 375        struct bfa_plog_rec_s  lp;
 376        u32 i;
 377
 378        if (num_ints > BFA_PL_INT_LOG_SZ)
 379                num_ints = BFA_PL_INT_LOG_SZ;
 380
 381        if (plog->plog_enabled) {
 382                memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 383                lp.mid = mid;
 384                lp.eid = event;
 385                lp.log_type = BFA_PL_LOG_TYPE_INT;
 386                lp.misc = misc;
 387
 388                for (i = 0; i < num_ints; i++)
 389                        lp.log_entry.int_log[i] = intarr[i];
 390
 391                lp.log_num_ints = (u8) num_ints;
 392
 393                bfa_plog_add(plog, &lp);
 394        }
 395}
 396
 397void
 398bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 399                        enum bfa_plog_eid event,
 400                        u16 misc, struct fchs_s *fchdr)
 401{
 402        struct bfa_plog_rec_s  lp;
 403        u32     *tmp_int = (u32 *) fchdr;
 404        u32     ints[BFA_PL_INT_LOG_SZ];
 405
 406        if (plog->plog_enabled) {
 407                memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 408
 409                ints[0] = tmp_int[0];
 410                ints[1] = tmp_int[1];
 411                ints[2] = tmp_int[4];
 412
 413                bfa_plog_intarr(plog, mid, event, misc, ints, 3);
 414        }
 415}
 416
 417void
 418bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 419                      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
 420                      u32 pld_w0)
 421{
 422        struct bfa_plog_rec_s  lp;
 423        u32     *tmp_int = (u32 *) fchdr;
 424        u32     ints[BFA_PL_INT_LOG_SZ];
 425
 426        if (plog->plog_enabled) {
 427                memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 428
 429                ints[0] = tmp_int[0];
 430                ints[1] = tmp_int[1];
 431                ints[2] = tmp_int[4];
 432                ints[3] = pld_w0;
 433
 434                bfa_plog_intarr(plog, mid, event, misc, ints, 4);
 435        }
 436}
 437
 438
 439/*
 440 *  fcxp_pvt BFA FCXP private functions
 441 */
 442
 443static void
 444claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
 445{
 446        u16     i;
 447        struct bfa_fcxp_s *fcxp;
 448
 449        fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
 450        memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
 451
 452        INIT_LIST_HEAD(&mod->fcxp_req_free_q);
 453        INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
 454        INIT_LIST_HEAD(&mod->fcxp_active_q);
 455        INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
 456        INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
 457
 458        mod->fcxp_list = fcxp;
 459
 460        for (i = 0; i < mod->num_fcxps; i++) {
 461                fcxp->fcxp_mod = mod;
 462                fcxp->fcxp_tag = i;
 463
 464                if (i < (mod->num_fcxps / 2)) {
 465                        list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
 466                        fcxp->req_rsp = BFA_TRUE;
 467                } else {
 468                        list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
 469                        fcxp->req_rsp = BFA_FALSE;
 470                }
 471
 472                bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
 473                fcxp->reqq_waiting = BFA_FALSE;
 474
 475                fcxp = fcxp + 1;
 476        }
 477
 478        bfa_mem_kva_curp(mod) = (void *)fcxp;
 479}
 480
 481static void
 482bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
 483                struct bfa_s *bfa)
 484{
 485        struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
 486        struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
 487        struct bfa_mem_dma_s *seg_ptr;
 488        u16     nsegs, idx, per_seg_fcxp;
 489        u16     num_fcxps = cfg->fwcfg.num_fcxp_reqs;
 490        u32     per_fcxp_sz;
 491
 492        if (num_fcxps == 0)
 493                return;
 494
 495        if (cfg->drvcfg.min_cfg)
 496                per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
 497        else
 498                per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
 499
 500        /* dma memory */
 501        nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
 502        per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
 503
 504        bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
 505                if (num_fcxps >= per_seg_fcxp) {
 506                        num_fcxps -= per_seg_fcxp;
 507                        bfa_mem_dma_setup(minfo, seg_ptr,
 508                                per_seg_fcxp * per_fcxp_sz);
 509                } else
 510                        bfa_mem_dma_setup(minfo, seg_ptr,
 511                                num_fcxps * per_fcxp_sz);
 512        }
 513
 514        /* kva memory */
 515        bfa_mem_kva_setup(minfo, fcxp_kva,
 516                cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
 517}
 518
 519static void
 520bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 521                struct bfa_pcidev_s *pcidev)
 522{
 523        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 524
 525        mod->bfa = bfa;
 526        mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
 527
 528        /*
 529         * Initialize FCXP request and response payload sizes.
 530         */
 531        mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
 532        if (!cfg->drvcfg.min_cfg)
 533                mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
 534
 535        INIT_LIST_HEAD(&mod->req_wait_q);
 536        INIT_LIST_HEAD(&mod->rsp_wait_q);
 537
 538        claim_fcxps_mem(mod);
 539}
 540
 541static void
 542bfa_fcxp_detach(struct bfa_s *bfa)
 543{
 544}
 545
 546static void
 547bfa_fcxp_start(struct bfa_s *bfa)
 548{
 549}
 550
 551static void
 552bfa_fcxp_stop(struct bfa_s *bfa)
 553{
 554}
 555
 556static void
 557bfa_fcxp_iocdisable(struct bfa_s *bfa)
 558{
 559        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 560        struct bfa_fcxp_s *fcxp;
 561        struct list_head              *qe, *qen;
 562
 563        /* Enqueue unused fcxp resources to free_q */
 564        list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
 565        list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
 566
 567        list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
 568                fcxp = (struct bfa_fcxp_s *) qe;
 569                if (fcxp->caller == NULL) {
 570                        fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
 571                                        BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
 572                        bfa_fcxp_free(fcxp);
 573                } else {
 574                        fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
 575                        bfa_cb_queue(bfa, &fcxp->hcb_qe,
 576                                     __bfa_fcxp_send_cbfn, fcxp);
 577                }
 578        }
 579}
 580
 581static struct bfa_fcxp_s *
 582bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
 583{
 584        struct bfa_fcxp_s *fcxp;
 585
 586        if (req)
 587                bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
 588        else
 589                bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
 590
 591        if (fcxp)
 592                list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
 593
 594        return fcxp;
 595}
 596
 597static void
 598bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
 599               struct bfa_s *bfa,
 600               u8 *use_ibuf,
 601               u32 *nr_sgles,
 602               bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
 603               bfa_fcxp_get_sglen_t *r_sglen_cbfn,
 604               struct list_head *r_sgpg_q,
 605               int n_sgles,
 606               bfa_fcxp_get_sgaddr_t sga_cbfn,
 607               bfa_fcxp_get_sglen_t sglen_cbfn)
 608{
 609
 610        WARN_ON(bfa == NULL);
 611
 612        bfa_trc(bfa, fcxp->fcxp_tag);
 613
 614        if (n_sgles == 0) {
 615                *use_ibuf = 1;
 616        } else {
 617                WARN_ON(*sga_cbfn == NULL);
 618                WARN_ON(*sglen_cbfn == NULL);
 619
 620                *use_ibuf = 0;
 621                *r_sga_cbfn = sga_cbfn;
 622                *r_sglen_cbfn = sglen_cbfn;
 623
 624                *nr_sgles = n_sgles;
 625
 626                /*
 627                 * alloc required sgpgs
 628                 */
 629                if (n_sgles > BFI_SGE_INLINE)
 630                        WARN_ON(1);
 631        }
 632
 633}
 634
 635static void
 636bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
 637               void *caller, struct bfa_s *bfa, int nreq_sgles,
 638               int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
 639               bfa_fcxp_get_sglen_t req_sglen_cbfn,
 640               bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
 641               bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
 642{
 643
 644        WARN_ON(bfa == NULL);
 645
 646        bfa_trc(bfa, fcxp->fcxp_tag);
 647
 648        fcxp->caller = caller;
 649
 650        bfa_fcxp_init_reqrsp(fcxp, bfa,
 651                &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
 652                &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
 653                nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
 654
 655        bfa_fcxp_init_reqrsp(fcxp, bfa,
 656                &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
 657                &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
 658                nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
 659
 660}
 661
 662static void
 663bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
 664{
 665        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 666        struct bfa_fcxp_wqe_s *wqe;
 667
 668        if (fcxp->req_rsp)
 669                bfa_q_deq(&mod->req_wait_q, &wqe);
 670        else
 671                bfa_q_deq(&mod->rsp_wait_q, &wqe);
 672
 673        if (wqe) {
 674                bfa_trc(mod->bfa, fcxp->fcxp_tag);
 675
 676                bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
 677                        wqe->nrsp_sgles, wqe->req_sga_cbfn,
 678                        wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
 679                        wqe->rsp_sglen_cbfn);
 680
 681                wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
 682                return;
 683        }
 684
 685        WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
 686        list_del(&fcxp->qe);
 687
 688        if (fcxp->req_rsp)
 689                list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
 690        else
 691                list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
 692}
 693
 694static void
 695bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
 696                   bfa_status_t req_status, u32 rsp_len,
 697                   u32 resid_len, struct fchs_s *rsp_fchs)
 698{
 699        /* discarded fcxp completion */
 700}
 701
 702static void
 703__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
 704{
 705        struct bfa_fcxp_s *fcxp = cbarg;
 706
 707        if (complete) {
 708                fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
 709                                fcxp->rsp_status, fcxp->rsp_len,
 710                                fcxp->residue_len, &fcxp->rsp_fchs);
 711        } else {
 712                bfa_fcxp_free(fcxp);
 713        }
 714}
 715
 716static void
 717hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
 718{
 719        struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
 720        struct bfa_fcxp_s       *fcxp;
 721        u16             fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
 722
 723        bfa_trc(bfa, fcxp_tag);
 724
 725        fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
 726
 727        /*
 728         * @todo f/w should not set residue to non-0 when everything
 729         *       is received.
 730         */
 731        if (fcxp_rsp->req_status == BFA_STATUS_OK)
 732                fcxp_rsp->residue_len = 0;
 733        else
 734                fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
 735
 736        fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
 737
 738        WARN_ON(fcxp->send_cbfn == NULL);
 739
 740        hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
 741
 742        if (fcxp->send_cbfn != NULL) {
 743                bfa_trc(mod->bfa, (NULL == fcxp->caller));
 744                if (fcxp->caller == NULL) {
 745                        fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
 746                                        fcxp_rsp->req_status, fcxp_rsp->rsp_len,
 747                                        fcxp_rsp->residue_len, &fcxp_rsp->fchs);
 748                        /*
 749                         * fcxp automatically freed on return from the callback
 750                         */
 751                        bfa_fcxp_free(fcxp);
 752                } else {
 753                        fcxp->rsp_status = fcxp_rsp->req_status;
 754                        fcxp->rsp_len = fcxp_rsp->rsp_len;
 755                        fcxp->residue_len = fcxp_rsp->residue_len;
 756                        fcxp->rsp_fchs = fcxp_rsp->fchs;
 757
 758                        bfa_cb_queue(bfa, &fcxp->hcb_qe,
 759                                        __bfa_fcxp_send_cbfn, fcxp);
 760                }
 761        } else {
 762                bfa_trc(bfa, (NULL == fcxp->send_cbfn));
 763        }
 764}
 765
 766static void
 767hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
 768                 struct fchs_s *fchs)
 769{
 770        /*
 771         * TODO: TX ox_id
 772         */
 773        if (reqlen > 0) {
 774                if (fcxp->use_ireqbuf) {
 775                        u32     pld_w0 =
 776                                *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
 777
 778                        bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
 779                                        BFA_PL_EID_TX,
 780                                        reqlen + sizeof(struct fchs_s), fchs,
 781                                        pld_w0);
 782                } else {
 783                        bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
 784                                        BFA_PL_EID_TX,
 785                                        reqlen + sizeof(struct fchs_s),
 786                                        fchs);
 787                }
 788        } else {
 789                bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
 790                               reqlen + sizeof(struct fchs_s), fchs);
 791        }
 792}
 793
 794static void
 795hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
 796                 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
 797{
 798        if (fcxp_rsp->rsp_len > 0) {
 799                if (fcxp->use_irspbuf) {
 800                        u32     pld_w0 =
 801                                *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
 802
 803                        bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
 804                                              BFA_PL_EID_RX,
 805                                              (u16) fcxp_rsp->rsp_len,
 806                                              &fcxp_rsp->fchs, pld_w0);
 807                } else {
 808                        bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
 809                                       BFA_PL_EID_RX,
 810                                       (u16) fcxp_rsp->rsp_len,
 811                                       &fcxp_rsp->fchs);
 812                }
 813        } else {
 814                bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
 815                               (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
 816        }
 817}
 818
 819/*
 820 * Handler to resume sending fcxp when space in available in cpe queue.
 821 */
 822static void
 823bfa_fcxp_qresume(void *cbarg)
 824{
 825        struct bfa_fcxp_s               *fcxp = cbarg;
 826        struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
 827        struct bfi_fcxp_send_req_s      *send_req;
 828
 829        fcxp->reqq_waiting = BFA_FALSE;
 830        send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
 831        bfa_fcxp_queue(fcxp, send_req);
 832}
 833
 834/*
 835 * Queue fcxp send request to foimrware.
 836 */
 837static void
 838bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
 839{
 840        struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
 841        struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
 842        struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
 843        struct bfa_rport_s              *rport = reqi->bfa_rport;
 844
 845        bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
 846                    bfa_fn_lpu(bfa));
 847
 848        send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
 849        if (rport) {
 850                send_req->rport_fw_hndl = rport->fw_handle;
 851                send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
 852                if (send_req->max_frmsz == 0)
 853                        send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
 854        } else {
 855                send_req->rport_fw_hndl = 0;
 856                send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
 857        }
 858
 859        send_req->vf_id = cpu_to_be16(reqi->vf_id);
 860        send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
 861        send_req->class = reqi->class;
 862        send_req->rsp_timeout = rspi->rsp_timeout;
 863        send_req->cts = reqi->cts;
 864        send_req->fchs = reqi->fchs;
 865
 866        send_req->req_len = cpu_to_be32(reqi->req_tot_len);
 867        send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
 868
 869        /*
 870         * setup req sgles
 871         */
 872        if (fcxp->use_ireqbuf == 1) {
 873                bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
 874                                        BFA_FCXP_REQ_PLD_PA(fcxp));
 875        } else {
 876                if (fcxp->nreq_sgles > 0) {
 877                        WARN_ON(fcxp->nreq_sgles != 1);
 878                        bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
 879                                fcxp->req_sga_cbfn(fcxp->caller, 0));
 880                } else {
 881                        WARN_ON(reqi->req_tot_len != 0);
 882                        bfa_alen_set(&send_req->rsp_alen, 0, 0);
 883                }
 884        }
 885
 886        /*
 887         * setup rsp sgles
 888         */
 889        if (fcxp->use_irspbuf == 1) {
 890                WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
 891
 892                bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
 893                                        BFA_FCXP_RSP_PLD_PA(fcxp));
 894        } else {
 895                if (fcxp->nrsp_sgles > 0) {
 896                        WARN_ON(fcxp->nrsp_sgles != 1);
 897                        bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
 898                                fcxp->rsp_sga_cbfn(fcxp->caller, 0));
 899
 900                } else {
 901                        WARN_ON(rspi->rsp_maxlen != 0);
 902                        bfa_alen_set(&send_req->rsp_alen, 0, 0);
 903                }
 904        }
 905
 906        hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
 907
 908        bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
 909
 910        bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
 911        bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
 912}
 913
 914/*
 915 * Allocate an FCXP instance to send a response or to send a request
 916 * that has a response. Request/response buffers are allocated by caller.
 917 *
 918 * @param[in]   bfa             BFA bfa instance
 919 * @param[in]   nreq_sgles      Number of SG elements required for request
 920 *                              buffer. 0, if fcxp internal buffers are used.
 921 *                              Use bfa_fcxp_get_reqbuf() to get the
 922 *                              internal req buffer.
 923 * @param[in]   req_sgles       SG elements describing request buffer. Will be
 924 *                              copied in by BFA and hence can be freed on
 925 *                              return from this function.
 926 * @param[in]   get_req_sga     function ptr to be called to get a request SG
 927 *                              Address (given the sge index).
 928 * @param[in]   get_req_sglen   function ptr to be called to get a request SG
 929 *                              len (given the sge index).
 930 * @param[in]   get_rsp_sga     function ptr to be called to get a response SG
 931 *                              Address (given the sge index).
 932 * @param[in]   get_rsp_sglen   function ptr to be called to get a response SG
 933 *                              len (given the sge index).
 934 * @param[in]   req             Allocated FCXP is used to send req or rsp?
 935 *                              request - BFA_TRUE, response - BFA_FALSE
 936 *
 937 * @return FCXP instance. NULL on failure.
 938 */
 939struct bfa_fcxp_s *
 940bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
 941                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
 942                bfa_fcxp_get_sglen_t req_sglen_cbfn,
 943                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
 944                bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
 945{
 946        struct bfa_fcxp_s *fcxp = NULL;
 947
 948        WARN_ON(bfa == NULL);
 949
 950        fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
 951        if (fcxp == NULL)
 952                return NULL;
 953
 954        bfa_trc(bfa, fcxp->fcxp_tag);
 955
 956        bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
 957                        req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
 958
 959        return fcxp;
 960}
 961
 962/*
 963 * Get the internal request buffer pointer
 964 *
 965 * @param[in]   fcxp    BFA fcxp pointer
 966 *
 967 * @return              pointer to the internal request buffer
 968 */
 969void *
 970bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
 971{
 972        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 973        void    *reqbuf;
 974
 975        WARN_ON(fcxp->use_ireqbuf != 1);
 976        reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
 977                                mod->req_pld_sz + mod->rsp_pld_sz);
 978        return reqbuf;
 979}
 980
 981u32
 982bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
 983{
 984        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 985
 986        return mod->req_pld_sz;
 987}
 988
 989/*
 990 * Get the internal response buffer pointer
 991 *
 992 * @param[in]   fcxp    BFA fcxp pointer
 993 *
 994 * @return              pointer to the internal request buffer
 995 */
 996void *
 997bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
 998{
 999        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1000        void    *fcxp_buf;
1001
1002        WARN_ON(fcxp->use_irspbuf != 1);
1003
1004        fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
1005                                mod->req_pld_sz + mod->rsp_pld_sz);
1006
1007        /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
1008        return ((u8 *) fcxp_buf) + mod->req_pld_sz;
1009}
1010
1011/*
1012 * Free the BFA FCXP
1013 *
1014 * @param[in]   fcxp                    BFA fcxp pointer
1015 *
1016 * @return              void
1017 */
1018void
1019bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1020{
1021        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1022
1023        WARN_ON(fcxp == NULL);
1024        bfa_trc(mod->bfa, fcxp->fcxp_tag);
1025        bfa_fcxp_put(fcxp);
1026}
1027
1028/*
1029 * Send a FCXP request
1030 *
1031 * @param[in]   fcxp    BFA fcxp pointer
1032 * @param[in]   rport   BFA rport pointer. Could be left NULL for WKA rports
1033 * @param[in]   vf_id   virtual Fabric ID
1034 * @param[in]   lp_tag  lport tag
1035 * @param[in]   cts     use Continuous sequence
1036 * @param[in]   cos     fc Class of Service
1037 * @param[in]   reqlen  request length, does not include FCHS length
1038 * @param[in]   fchs    fc Header Pointer. The header content will be copied
1039 *                      in by BFA.
1040 *
1041 * @param[in]   cbfn    call back function to be called on receiving
1042 *                                                              the response
1043 * @param[in]   cbarg   arg for cbfn
1044 * @param[in]   rsp_timeout
1045 *                      response timeout
1046 *
1047 * @return              bfa_status_t
1048 */
1049void
1050bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1051              u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1052              u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1053              void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1054{
1055        struct bfa_s                    *bfa  = fcxp->fcxp_mod->bfa;
1056        struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
1057        struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
1058        struct bfi_fcxp_send_req_s      *send_req;
1059
1060        bfa_trc(bfa, fcxp->fcxp_tag);
1061
1062        /*
1063         * setup request/response info
1064         */
1065        reqi->bfa_rport = rport;
1066        reqi->vf_id = vf_id;
1067        reqi->lp_tag = lp_tag;
1068        reqi->class = cos;
1069        rspi->rsp_timeout = rsp_timeout;
1070        reqi->cts = cts;
1071        reqi->fchs = *fchs;
1072        reqi->req_tot_len = reqlen;
1073        rspi->rsp_maxlen = rsp_maxlen;
1074        fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1075        fcxp->send_cbarg = cbarg;
1076
1077        /*
1078         * If no room in CPE queue, wait for space in request queue
1079         */
1080        send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1081        if (!send_req) {
1082                bfa_trc(bfa, fcxp->fcxp_tag);
1083                fcxp->reqq_waiting = BFA_TRUE;
1084                bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1085                return;
1086        }
1087
1088        bfa_fcxp_queue(fcxp, send_req);
1089}
1090
1091/*
1092 * Abort a BFA FCXP
1093 *
1094 * @param[in]   fcxp    BFA fcxp pointer
1095 *
1096 * @return              void
1097 */
1098bfa_status_t
1099bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1100{
1101        bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1102        WARN_ON(1);
1103        return BFA_STATUS_OK;
1104}
1105
1106void
1107bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1108               bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1109               void *caller, int nreq_sgles,
1110               int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1111               bfa_fcxp_get_sglen_t req_sglen_cbfn,
1112               bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1113               bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
1114{
1115        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1116
1117        if (req)
1118                WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1119        else
1120                WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
1121
1122        wqe->alloc_cbfn = alloc_cbfn;
1123        wqe->alloc_cbarg = alloc_cbarg;
1124        wqe->caller = caller;
1125        wqe->bfa = bfa;
1126        wqe->nreq_sgles = nreq_sgles;
1127        wqe->nrsp_sgles = nrsp_sgles;
1128        wqe->req_sga_cbfn = req_sga_cbfn;
1129        wqe->req_sglen_cbfn = req_sglen_cbfn;
1130        wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1131        wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1132
1133        if (req)
1134                list_add_tail(&wqe->qe, &mod->req_wait_q);
1135        else
1136                list_add_tail(&wqe->qe, &mod->rsp_wait_q);
1137}
1138
1139void
1140bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1141{
1142        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1143
1144        WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1145                !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
1146        list_del(&wqe->qe);
1147}
1148
1149void
1150bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1151{
1152        /*
1153         * If waiting for room in request queue, cancel reqq wait
1154         * and free fcxp.
1155         */
1156        if (fcxp->reqq_waiting) {
1157                fcxp->reqq_waiting = BFA_FALSE;
1158                bfa_reqq_wcancel(&fcxp->reqq_wqe);
1159                bfa_fcxp_free(fcxp);
1160                return;
1161        }
1162
1163        fcxp->send_cbfn = bfa_fcxp_null_comp;
1164}
1165
1166void
1167bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1168{
1169        switch (msg->mhdr.msg_id) {
1170        case BFI_FCXP_I2H_SEND_RSP:
1171                hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1172                break;
1173
1174        default:
1175                bfa_trc(bfa, msg->mhdr.msg_id);
1176                WARN_ON(1);
1177        }
1178}
1179
1180u32
1181bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1182{
1183        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1184
1185        return mod->rsp_pld_sz;
1186}
1187
1188void
1189bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1190{
1191        struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
1192        struct list_head        *qe;
1193        int     i;
1194
1195        for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1196                if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1197                        bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1198                        list_add_tail(qe, &mod->fcxp_req_unused_q);
1199                } else {
1200                        bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1201                        list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1202                }
1203        }
1204}
1205
1206/*
1207 *  BFA LPS state machine functions
1208 */
1209
1210/*
1211 * Init state -- no login
1212 */
1213static void
1214bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1215{
1216        bfa_trc(lps->bfa, lps->bfa_tag);
1217        bfa_trc(lps->bfa, event);
1218
1219        switch (event) {
1220        case BFA_LPS_SM_LOGIN:
1221                if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1222                        bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1223                        bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1224                } else {
1225                        bfa_sm_set_state(lps, bfa_lps_sm_login);
1226                        bfa_lps_send_login(lps);
1227                }
1228
1229                if (lps->fdisc)
1230                        bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1231                                BFA_PL_EID_LOGIN, 0, "FDISC Request");
1232                else
1233                        bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1234                                BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1235                break;
1236
1237        case BFA_LPS_SM_LOGOUT:
1238                bfa_lps_logout_comp(lps);
1239                break;
1240
1241        case BFA_LPS_SM_DELETE:
1242                bfa_lps_free(lps);
1243                break;
1244
1245        case BFA_LPS_SM_RX_CVL:
1246        case BFA_LPS_SM_OFFLINE:
1247                break;
1248
1249        case BFA_LPS_SM_FWRSP:
1250                /*
1251                 * Could happen when fabric detects loopback and discards
1252                 * the lps request. Fw will eventually sent out the timeout
1253                 * Just ignore
1254                 */
1255                break;
1256        case BFA_LPS_SM_SET_N2N_PID:
1257                /*
1258                 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1259                 * this event. Ignore this event.
1260                 */
1261                break;
1262
1263        default:
1264                bfa_sm_fault(lps->bfa, event);
1265        }
1266}
1267
1268/*
1269 * login is in progress -- awaiting response from firmware
1270 */
1271static void
1272bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1273{
1274        bfa_trc(lps->bfa, lps->bfa_tag);
1275        bfa_trc(lps->bfa, event);
1276
1277        switch (event) {
1278        case BFA_LPS_SM_FWRSP:
1279        case BFA_LPS_SM_OFFLINE:
1280                if (lps->status == BFA_STATUS_OK) {
1281                        bfa_sm_set_state(lps, bfa_lps_sm_online);
1282                        if (lps->fdisc)
1283                                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1284                                        BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1285                        else
1286                                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1287                                        BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1288                        /* If N2N, send the assigned PID to FW */
1289                        bfa_trc(lps->bfa, lps->fport);
1290                        bfa_trc(lps->bfa, lps->lp_pid);
1291
1292                        if (!lps->fport && lps->lp_pid)
1293                                bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1294                } else {
1295                        bfa_sm_set_state(lps, bfa_lps_sm_init);
1296                        if (lps->fdisc)
1297                                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1298                                        BFA_PL_EID_LOGIN, 0,
1299                                        "FDISC Fail (RJT or timeout)");
1300                        else
1301                                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1302                                        BFA_PL_EID_LOGIN, 0,
1303                                        "FLOGI Fail (RJT or timeout)");
1304                }
1305                bfa_lps_login_comp(lps);
1306                break;
1307
1308        case BFA_LPS_SM_DELETE:
1309                bfa_sm_set_state(lps, bfa_lps_sm_init);
1310                break;
1311
1312        case BFA_LPS_SM_SET_N2N_PID:
1313                bfa_trc(lps->bfa, lps->fport);
1314                bfa_trc(lps->bfa, lps->lp_pid);
1315                break;
1316
1317        default:
1318                bfa_sm_fault(lps->bfa, event);
1319        }
1320}
1321
1322/*
1323 * login pending - awaiting space in request queue
1324 */
1325static void
1326bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1327{
1328        bfa_trc(lps->bfa, lps->bfa_tag);
1329        bfa_trc(lps->bfa, event);
1330
1331        switch (event) {
1332        case BFA_LPS_SM_RESUME:
1333                bfa_sm_set_state(lps, bfa_lps_sm_login);
1334                bfa_lps_send_login(lps);
1335                break;
1336
1337        case BFA_LPS_SM_OFFLINE:
1338        case BFA_LPS_SM_DELETE:
1339                bfa_sm_set_state(lps, bfa_lps_sm_init);
1340                bfa_reqq_wcancel(&lps->wqe);
1341                break;
1342
1343        case BFA_LPS_SM_RX_CVL:
1344                /*
1345                 * Login was not even sent out; so when getting out
1346                 * of this state, it will appear like a login retry
1347                 * after Clear virtual link
1348                 */
1349                break;
1350
1351        default:
1352                bfa_sm_fault(lps->bfa, event);
1353        }
1354}
1355
1356/*
1357 * login complete
1358 */
1359static void
1360bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1361{
1362        bfa_trc(lps->bfa, lps->bfa_tag);
1363        bfa_trc(lps->bfa, event);
1364
1365        switch (event) {
1366        case BFA_LPS_SM_LOGOUT:
1367                if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1368                        bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1369                        bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1370                } else {
1371                        bfa_sm_set_state(lps, bfa_lps_sm_logout);
1372                        bfa_lps_send_logout(lps);
1373                }
1374                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1375                        BFA_PL_EID_LOGO, 0, "Logout");
1376                break;
1377
1378        case BFA_LPS_SM_RX_CVL:
1379                bfa_sm_set_state(lps, bfa_lps_sm_init);
1380
1381                /* Let the vport module know about this event */
1382                bfa_lps_cvl_event(lps);
1383                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1384                        BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1385                break;
1386
1387        case BFA_LPS_SM_SET_N2N_PID:
1388                if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1389                        bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1390                        bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1391                } else
1392                        bfa_lps_send_set_n2n_pid(lps);
1393                break;
1394
1395        case BFA_LPS_SM_OFFLINE:
1396        case BFA_LPS_SM_DELETE:
1397                bfa_sm_set_state(lps, bfa_lps_sm_init);
1398                break;
1399
1400        default:
1401                bfa_sm_fault(lps->bfa, event);
1402        }
1403}
1404
1405/*
1406 * login complete
1407 */
1408static void
1409bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1410{
1411        bfa_trc(lps->bfa, lps->bfa_tag);
1412        bfa_trc(lps->bfa, event);
1413
1414        switch (event) {
1415        case BFA_LPS_SM_RESUME:
1416                bfa_sm_set_state(lps, bfa_lps_sm_online);
1417                bfa_lps_send_set_n2n_pid(lps);
1418                break;
1419
1420        case BFA_LPS_SM_LOGOUT:
1421                bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1422                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1423                        BFA_PL_EID_LOGO, 0, "Logout");
1424                break;
1425
1426        case BFA_LPS_SM_RX_CVL:
1427                bfa_sm_set_state(lps, bfa_lps_sm_init);
1428                bfa_reqq_wcancel(&lps->wqe);
1429
1430                /* Let the vport module know about this event */
1431                bfa_lps_cvl_event(lps);
1432                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1433                        BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1434                break;
1435
1436        case BFA_LPS_SM_OFFLINE:
1437        case BFA_LPS_SM_DELETE:
1438                bfa_sm_set_state(lps, bfa_lps_sm_init);
1439                bfa_reqq_wcancel(&lps->wqe);
1440                break;
1441
1442        default:
1443                bfa_sm_fault(lps->bfa, event);
1444        }
1445}
1446
1447/*
1448 * logout in progress - awaiting firmware response
1449 */
1450static void
1451bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1452{
1453        bfa_trc(lps->bfa, lps->bfa_tag);
1454        bfa_trc(lps->bfa, event);
1455
1456        switch (event) {
1457        case BFA_LPS_SM_FWRSP:
1458        case BFA_LPS_SM_OFFLINE:
1459                bfa_sm_set_state(lps, bfa_lps_sm_init);
1460                bfa_lps_logout_comp(lps);
1461                break;
1462
1463        case BFA_LPS_SM_DELETE:
1464                bfa_sm_set_state(lps, bfa_lps_sm_init);
1465                break;
1466
1467        default:
1468                bfa_sm_fault(lps->bfa, event);
1469        }
1470}
1471
1472/*
1473 * logout pending -- awaiting space in request queue
1474 */
1475static void
1476bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1477{
1478        bfa_trc(lps->bfa, lps->bfa_tag);
1479        bfa_trc(lps->bfa, event);
1480
1481        switch (event) {
1482        case BFA_LPS_SM_RESUME:
1483                bfa_sm_set_state(lps, bfa_lps_sm_logout);
1484                bfa_lps_send_logout(lps);
1485                break;
1486
1487        case BFA_LPS_SM_OFFLINE:
1488        case BFA_LPS_SM_DELETE:
1489                bfa_sm_set_state(lps, bfa_lps_sm_init);
1490                bfa_reqq_wcancel(&lps->wqe);
1491                break;
1492
1493        default:
1494                bfa_sm_fault(lps->bfa, event);
1495        }
1496}
1497
1498
1499
1500/*
1501 *  lps_pvt BFA LPS private functions
1502 */
1503
1504/*
1505 * return memory requirement
1506 */
1507static void
1508bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1509                struct bfa_s *bfa)
1510{
1511        struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1512
1513        if (cfg->drvcfg.min_cfg)
1514                bfa_mem_kva_setup(minfo, lps_kva,
1515                        sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1516        else
1517                bfa_mem_kva_setup(minfo, lps_kva,
1518                        sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1519}
1520
1521/*
1522 * bfa module attach at initialization time
1523 */
1524static void
1525bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1526        struct bfa_pcidev_s *pcidev)
1527{
1528        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1529        struct bfa_lps_s        *lps;
1530        int                     i;
1531
1532        mod->num_lps = BFA_LPS_MAX_LPORTS;
1533        if (cfg->drvcfg.min_cfg)
1534                mod->num_lps = BFA_LPS_MIN_LPORTS;
1535        else
1536                mod->num_lps = BFA_LPS_MAX_LPORTS;
1537        mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1538
1539        bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1540
1541        INIT_LIST_HEAD(&mod->lps_free_q);
1542        INIT_LIST_HEAD(&mod->lps_active_q);
1543        INIT_LIST_HEAD(&mod->lps_login_q);
1544
1545        for (i = 0; i < mod->num_lps; i++, lps++) {
1546                lps->bfa        = bfa;
1547                lps->bfa_tag    = (u8) i;
1548                lps->reqq       = BFA_REQQ_LPS;
1549                bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1550                list_add_tail(&lps->qe, &mod->lps_free_q);
1551        }
1552}
1553
1554static void
1555bfa_lps_detach(struct bfa_s *bfa)
1556{
1557}
1558
1559static void
1560bfa_lps_start(struct bfa_s *bfa)
1561{
1562}
1563
1564static void
1565bfa_lps_stop(struct bfa_s *bfa)
1566{
1567}
1568
1569/*
1570 * IOC in disabled state -- consider all lps offline
1571 */
1572static void
1573bfa_lps_iocdisable(struct bfa_s *bfa)
1574{
1575        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1576        struct bfa_lps_s        *lps;
1577        struct list_head                *qe, *qen;
1578
1579        list_for_each_safe(qe, qen, &mod->lps_active_q) {
1580                lps = (struct bfa_lps_s *) qe;
1581                bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1582        }
1583        list_for_each_safe(qe, qen, &mod->lps_login_q) {
1584                lps = (struct bfa_lps_s *) qe;
1585                bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1586        }
1587        list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1588}
1589
1590/*
1591 * Firmware login response
1592 */
1593static void
1594bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1595{
1596        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1597        struct bfa_lps_s        *lps;
1598
1599        WARN_ON(rsp->bfa_tag >= mod->num_lps);
1600        lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1601
1602        lps->status = rsp->status;
1603        switch (rsp->status) {
1604        case BFA_STATUS_OK:
1605                lps->fw_tag     = rsp->fw_tag;
1606                lps->fport      = rsp->f_port;
1607                if (lps->fport)
1608                        lps->lp_pid = rsp->lp_pid;
1609                lps->npiv_en    = rsp->npiv_en;
1610                lps->pr_bbcred  = be16_to_cpu(rsp->bb_credit);
1611                lps->pr_pwwn    = rsp->port_name;
1612                lps->pr_nwwn    = rsp->node_name;
1613                lps->auth_req   = rsp->auth_req;
1614                lps->lp_mac     = rsp->lp_mac;
1615                lps->brcd_switch = rsp->brcd_switch;
1616                lps->fcf_mac    = rsp->fcf_mac;
1617                lps->pr_bbscn   = rsp->bb_scn;
1618
1619                break;
1620
1621        case BFA_STATUS_FABRIC_RJT:
1622                lps->lsrjt_rsn = rsp->lsrjt_rsn;
1623                lps->lsrjt_expl = rsp->lsrjt_expl;
1624
1625                break;
1626
1627        case BFA_STATUS_EPROTOCOL:
1628                lps->ext_status = rsp->ext_status;
1629
1630                break;
1631
1632        case BFA_STATUS_VPORT_MAX:
1633                if (rsp->ext_status)
1634                        bfa_lps_no_res(lps, rsp->ext_status);
1635                break;
1636
1637        default:
1638                /* Nothing to do with other status */
1639                break;
1640        }
1641
1642        list_del(&lps->qe);
1643        list_add_tail(&lps->qe, &mod->lps_active_q);
1644        bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1645}
1646
1647static void
1648bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1649{
1650        struct bfa_s            *bfa = first_lps->bfa;
1651        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1652        struct list_head        *qe, *qe_next;
1653        struct bfa_lps_s        *lps;
1654
1655        bfa_trc(bfa, count);
1656
1657        qe = bfa_q_next(first_lps);
1658
1659        while (count && qe) {
1660                qe_next = bfa_q_next(qe);
1661                lps = (struct bfa_lps_s *)qe;
1662                bfa_trc(bfa, lps->bfa_tag);
1663                lps->status = first_lps->status;
1664                list_del(&lps->qe);
1665                list_add_tail(&lps->qe, &mod->lps_active_q);
1666                bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1667                qe = qe_next;
1668                count--;
1669        }
1670}
1671
1672/*
1673 * Firmware logout response
1674 */
1675static void
1676bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1677{
1678        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1679        struct bfa_lps_s        *lps;
1680
1681        WARN_ON(rsp->bfa_tag >= mod->num_lps);
1682        lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1683
1684        bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1685}
1686
1687/*
1688 * Firmware received a Clear virtual link request (for FCoE)
1689 */
1690static void
1691bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1692{
1693        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1694        struct bfa_lps_s        *lps;
1695
1696        lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1697
1698        bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1699}
1700
1701/*
1702 * Space is available in request queue, resume queueing request to firmware.
1703 */
1704static void
1705bfa_lps_reqq_resume(void *lps_arg)
1706{
1707        struct bfa_lps_s        *lps = lps_arg;
1708
1709        bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1710}
1711
1712/*
1713 * lps is freed -- triggered by vport delete
1714 */
1715static void
1716bfa_lps_free(struct bfa_lps_s *lps)
1717{
1718        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1719
1720        lps->lp_pid = 0;
1721        list_del(&lps->qe);
1722        list_add_tail(&lps->qe, &mod->lps_free_q);
1723}
1724
1725/*
1726 * send login request to firmware
1727 */
1728static void
1729bfa_lps_send_login(struct bfa_lps_s *lps)
1730{
1731        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1732        struct bfi_lps_login_req_s      *m;
1733
1734        m = bfa_reqq_next(lps->bfa, lps->reqq);
1735        WARN_ON(!m);
1736
1737        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1738                bfa_fn_lpu(lps->bfa));
1739
1740        m->bfa_tag      = lps->bfa_tag;
1741        m->alpa         = lps->alpa;
1742        m->pdu_size     = cpu_to_be16(lps->pdusz);
1743        m->pwwn         = lps->pwwn;
1744        m->nwwn         = lps->nwwn;
1745        m->fdisc        = lps->fdisc;
1746        m->auth_en      = lps->auth_en;
1747        m->bb_scn       = lps->bb_scn;
1748
1749        bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1750        list_del(&lps->qe);
1751        list_add_tail(&lps->qe, &mod->lps_login_q);
1752}
1753
1754/*
1755 * send logout request to firmware
1756 */
1757static void
1758bfa_lps_send_logout(struct bfa_lps_s *lps)
1759{
1760        struct bfi_lps_logout_req_s *m;
1761
1762        m = bfa_reqq_next(lps->bfa, lps->reqq);
1763        WARN_ON(!m);
1764
1765        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1766                bfa_fn_lpu(lps->bfa));
1767
1768        m->fw_tag = lps->fw_tag;
1769        m->port_name = lps->pwwn;
1770        bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1771}
1772
1773/*
1774 * send n2n pid set request to firmware
1775 */
1776static void
1777bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1778{
1779        struct bfi_lps_n2n_pid_req_s *m;
1780
1781        m = bfa_reqq_next(lps->bfa, lps->reqq);
1782        WARN_ON(!m);
1783
1784        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1785                bfa_fn_lpu(lps->bfa));
1786
1787        m->fw_tag = lps->fw_tag;
1788        m->lp_pid = lps->lp_pid;
1789        bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1790}
1791
1792/*
1793 * Indirect login completion handler for non-fcs
1794 */
1795static void
1796bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1797{
1798        struct bfa_lps_s *lps   = arg;
1799
1800        if (!complete)
1801                return;
1802
1803        if (lps->fdisc)
1804                bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1805        else
1806                bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1807}
1808
1809/*
1810 * Login completion handler -- direct call for fcs, queue for others
1811 */
1812static void
1813bfa_lps_login_comp(struct bfa_lps_s *lps)
1814{
1815        if (!lps->bfa->fcs) {
1816                bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1817                        lps);
1818                return;
1819        }
1820
1821        if (lps->fdisc)
1822                bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1823        else
1824                bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1825}
1826
1827/*
1828 * Indirect logout completion handler for non-fcs
1829 */
1830static void
1831bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1832{
1833        struct bfa_lps_s *lps   = arg;
1834
1835        if (!complete)
1836                return;
1837
1838        if (lps->fdisc)
1839                bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1840        else
1841                bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
1842}
1843
1844/*
1845 * Logout completion handler -- direct call for fcs, queue for others
1846 */
1847static void
1848bfa_lps_logout_comp(struct bfa_lps_s *lps)
1849{
1850        if (!lps->bfa->fcs) {
1851                bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1852                        lps);
1853                return;
1854        }
1855        if (lps->fdisc)
1856                bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1857}
1858
1859/*
1860 * Clear virtual link completion handler for non-fcs
1861 */
1862static void
1863bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1864{
1865        struct bfa_lps_s *lps   = arg;
1866
1867        if (!complete)
1868                return;
1869
1870        /* Clear virtual link to base port will result in link down */
1871        if (lps->fdisc)
1872                bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1873}
1874
1875/*
1876 * Received Clear virtual link event --direct call for fcs,
1877 * queue for others
1878 */
1879static void
1880bfa_lps_cvl_event(struct bfa_lps_s *lps)
1881{
1882        if (!lps->bfa->fcs) {
1883                bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1884                        lps);
1885                return;
1886        }
1887
1888        /* Clear virtual link to base port will result in link down */
1889        if (lps->fdisc)
1890                bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1891}
1892
1893
1894
1895/*
1896 *  lps_public BFA LPS public functions
1897 */
1898
1899u32
1900bfa_lps_get_max_vport(struct bfa_s *bfa)
1901{
1902        if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1903                return BFA_LPS_MAX_VPORTS_SUPP_CT;
1904        else
1905                return BFA_LPS_MAX_VPORTS_SUPP_CB;
1906}
1907
1908/*
1909 * Allocate a lport srvice tag.
1910 */
1911struct bfa_lps_s  *
1912bfa_lps_alloc(struct bfa_s *bfa)
1913{
1914        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1915        struct bfa_lps_s        *lps = NULL;
1916
1917        bfa_q_deq(&mod->lps_free_q, &lps);
1918
1919        if (lps == NULL)
1920                return NULL;
1921
1922        list_add_tail(&lps->qe, &mod->lps_active_q);
1923
1924        bfa_sm_set_state(lps, bfa_lps_sm_init);
1925        return lps;
1926}
1927
1928/*
1929 * Free lport service tag. This can be called anytime after an alloc.
1930 * No need to wait for any pending login/logout completions.
1931 */
1932void
1933bfa_lps_delete(struct bfa_lps_s *lps)
1934{
1935        bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1936}
1937
1938/*
1939 * Initiate a lport login.
1940 */
1941void
1942bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1943        wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
1944{
1945        lps->uarg       = uarg;
1946        lps->alpa       = alpa;
1947        lps->pdusz      = pdusz;
1948        lps->pwwn       = pwwn;
1949        lps->nwwn       = nwwn;
1950        lps->fdisc      = BFA_FALSE;
1951        lps->auth_en    = auth_en;
1952        lps->bb_scn     = bb_scn;
1953        bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1954}
1955
1956/*
1957 * Initiate a lport fdisc login.
1958 */
1959void
1960bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1961        wwn_t nwwn)
1962{
1963        lps->uarg       = uarg;
1964        lps->alpa       = 0;
1965        lps->pdusz      = pdusz;
1966        lps->pwwn       = pwwn;
1967        lps->nwwn       = nwwn;
1968        lps->fdisc      = BFA_TRUE;
1969        lps->auth_en    = BFA_FALSE;
1970        bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1971}
1972
1973
1974/*
1975 * Initiate a lport FDSIC logout.
1976 */
1977void
1978bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1979{
1980        bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1981}
1982
1983u8
1984bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1985{
1986        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1987
1988        return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1989}
1990
1991/*
1992 * Return lport services tag given the pid
1993 */
1994u8
1995bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1996{
1997        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1998        struct bfa_lps_s        *lps;
1999        int                     i;
2000
2001        for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
2002                if (lps->lp_pid == pid)
2003                        return lps->bfa_tag;
2004        }
2005
2006        /* Return base port tag anyway */
2007        return 0;
2008}
2009
2010
2011/*
2012 * return port id assigned to the base lport
2013 */
2014u32
2015bfa_lps_get_base_pid(struct bfa_s *bfa)
2016{
2017        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
2018
2019        return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
2020}
2021
2022/*
2023 * Set PID in case of n2n (which is assigned during PLOGI)
2024 */
2025void
2026bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
2027{
2028        bfa_trc(lps->bfa, lps->bfa_tag);
2029        bfa_trc(lps->bfa, n2n_pid);
2030
2031        lps->lp_pid = n2n_pid;
2032        bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
2033}
2034
2035/*
2036 * LPS firmware message class handler.
2037 */
2038void
2039bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2040{
2041        union bfi_lps_i2h_msg_u msg;
2042
2043        bfa_trc(bfa, m->mhdr.msg_id);
2044        msg.msg = m;
2045
2046        switch (m->mhdr.msg_id) {
2047        case BFI_LPS_I2H_LOGIN_RSP:
2048                bfa_lps_login_rsp(bfa, msg.login_rsp);
2049                break;
2050
2051        case BFI_LPS_I2H_LOGOUT_RSP:
2052                bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2053                break;
2054
2055        case BFI_LPS_I2H_CVL_EVENT:
2056                bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2057                break;
2058
2059        default:
2060                bfa_trc(bfa, m->mhdr.msg_id);
2061                WARN_ON(1);
2062        }
2063}
2064
2065static void
2066bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2067{
2068        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2069        struct bfa_aen_entry_s  *aen_entry;
2070
2071        bfad_get_aen_entry(bfad, aen_entry);
2072        if (!aen_entry)
2073                return;
2074
2075        aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2076        aen_entry->aen_data.port.pwwn = fcport->pwwn;
2077
2078        /* Send the AEN notification */
2079        bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2080                                  BFA_AEN_CAT_PORT, event);
2081}
2082
2083/*
2084 * FC PORT state machine functions
2085 */
2086static void
2087bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2088                        enum bfa_fcport_sm_event event)
2089{
2090        bfa_trc(fcport->bfa, event);
2091
2092        switch (event) {
2093        case BFA_FCPORT_SM_START:
2094                /*
2095                 * Start event after IOC is configured and BFA is started.
2096                 */
2097                fcport->use_flash_cfg = BFA_TRUE;
2098
2099                if (bfa_fcport_send_enable(fcport)) {
2100                        bfa_trc(fcport->bfa, BFA_TRUE);
2101                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2102                } else {
2103                        bfa_trc(fcport->bfa, BFA_FALSE);
2104                        bfa_sm_set_state(fcport,
2105                                        bfa_fcport_sm_enabling_qwait);
2106                }
2107                break;
2108
2109        case BFA_FCPORT_SM_ENABLE:
2110                /*
2111                 * Port is persistently configured to be in enabled state. Do
2112                 * not change state. Port enabling is done when START event is
2113                 * received.
2114                 */
2115                break;
2116
2117        case BFA_FCPORT_SM_DISABLE:
2118                /*
2119                 * If a port is persistently configured to be disabled, the
2120                 * first event will a port disable request.
2121                 */
2122                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2123                break;
2124
2125        case BFA_FCPORT_SM_HWFAIL:
2126                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2127                break;
2128
2129        default:
2130                bfa_sm_fault(fcport->bfa, event);
2131        }
2132}
2133
2134static void
2135bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2136                                enum bfa_fcport_sm_event event)
2137{
2138        char pwwn_buf[BFA_STRING_32];
2139        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2140        bfa_trc(fcport->bfa, event);
2141
2142        switch (event) {
2143        case BFA_FCPORT_SM_QRESUME:
2144                bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2145                bfa_fcport_send_enable(fcport);
2146                break;
2147
2148        case BFA_FCPORT_SM_STOP:
2149                bfa_reqq_wcancel(&fcport->reqq_wait);
2150                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2151                break;
2152
2153        case BFA_FCPORT_SM_ENABLE:
2154                /*
2155                 * Already enable is in progress.
2156                 */
2157                break;
2158
2159        case BFA_FCPORT_SM_DISABLE:
2160                /*
2161                 * Just send disable request to firmware when room becomes
2162                 * available in request queue.
2163                 */
2164                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2165                bfa_reqq_wcancel(&fcport->reqq_wait);
2166                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2167                                BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2168                wwn2str(pwwn_buf, fcport->pwwn);
2169                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2170                        "Base port disabled: WWN = %s\n", pwwn_buf);
2171                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2172                break;
2173
2174        case BFA_FCPORT_SM_LINKUP:
2175        case BFA_FCPORT_SM_LINKDOWN:
2176                /*
2177                 * Possible to get link events when doing back-to-back
2178                 * enable/disables.
2179                 */
2180                break;
2181
2182        case BFA_FCPORT_SM_HWFAIL:
2183                bfa_reqq_wcancel(&fcport->reqq_wait);
2184                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2185                break;
2186
2187        case BFA_FCPORT_SM_FAA_MISCONFIG:
2188                bfa_fcport_reset_linkinfo(fcport);
2189                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2190                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2191                break;
2192
2193        default:
2194                bfa_sm_fault(fcport->bfa, event);
2195        }
2196}
2197
2198static void
2199bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2200                                                enum bfa_fcport_sm_event event)
2201{
2202        char pwwn_buf[BFA_STRING_32];
2203        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2204        bfa_trc(fcport->bfa, event);
2205
2206        switch (event) {
2207        case BFA_FCPORT_SM_FWRSP:
2208        case BFA_FCPORT_SM_LINKDOWN:
2209                bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2210                break;
2211
2212        case BFA_FCPORT_SM_LINKUP:
2213                bfa_fcport_update_linkinfo(fcport);
2214                bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2215
2216                WARN_ON(!fcport->event_cbfn);
2217                bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2218                break;
2219
2220        case BFA_FCPORT_SM_ENABLE:
2221                /*
2222                 * Already being enabled.
2223                 */
2224                break;
2225
2226        case BFA_FCPORT_SM_DISABLE:
2227                if (bfa_fcport_send_disable(fcport))
2228                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2229                else
2230                        bfa_sm_set_state(fcport,
2231                                         bfa_fcport_sm_disabling_qwait);
2232
2233                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2234                                BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2235                wwn2str(pwwn_buf, fcport->pwwn);
2236                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2237                        "Base port disabled: WWN = %s\n", pwwn_buf);
2238                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2239                break;
2240
2241        case BFA_FCPORT_SM_STOP:
2242                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2243                break;
2244
2245        case BFA_FCPORT_SM_HWFAIL:
2246                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2247                break;
2248
2249        case BFA_FCPORT_SM_FAA_MISCONFIG:
2250                bfa_fcport_reset_linkinfo(fcport);
2251                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2252                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2253                break;
2254
2255        default:
2256                bfa_sm_fault(fcport->bfa, event);
2257        }
2258}
2259
2260static void
2261bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2262                                                enum bfa_fcport_sm_event event)
2263{
2264        struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2265        char pwwn_buf[BFA_STRING_32];
2266        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2267
2268        bfa_trc(fcport->bfa, event);
2269
2270        switch (event) {
2271        case BFA_FCPORT_SM_LINKUP:
2272                bfa_fcport_update_linkinfo(fcport);
2273                bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2274                WARN_ON(!fcport->event_cbfn);
2275                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2276                                BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2277                if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2278
2279                        bfa_trc(fcport->bfa,
2280                                pevent->link_state.attr.vc_fcf.fcf.fipenabled);
2281                        bfa_trc(fcport->bfa,
2282                                pevent->link_state.attr.vc_fcf.fcf.fipfailed);
2283
2284                        if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
2285                                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2286                                        BFA_PL_EID_FIP_FCF_DISC, 0,
2287                                        "FIP FCF Discovery Failed");
2288                        else
2289                                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2290                                        BFA_PL_EID_FIP_FCF_DISC, 0,
2291                                        "FIP FCF Discovered");
2292                }
2293
2294                bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2295                wwn2str(pwwn_buf, fcport->pwwn);
2296                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2297                        "Base port online: WWN = %s\n", pwwn_buf);
2298                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2299
2300                /* If QoS is enabled and it is not online, send AEN */
2301                if (fcport->cfg.qos_enabled &&
2302                    fcport->qos_attr.state != BFA_QOS_ONLINE)
2303                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2304                break;
2305
2306        case BFA_FCPORT_SM_LINKDOWN:
2307                /*
2308                 * Possible to get link down event.
2309                 */
2310                break;
2311
2312        case BFA_FCPORT_SM_ENABLE:
2313                /*
2314                 * Already enabled.
2315                 */
2316                break;
2317
2318        case BFA_FCPORT_SM_DISABLE:
2319                if (bfa_fcport_send_disable(fcport))
2320                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2321                else
2322                        bfa_sm_set_state(fcport,
2323                                         bfa_fcport_sm_disabling_qwait);
2324
2325                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2326                                BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2327                wwn2str(pwwn_buf, fcport->pwwn);
2328                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2329                        "Base port disabled: WWN = %s\n", pwwn_buf);
2330                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2331                break;
2332
2333        case BFA_FCPORT_SM_STOP:
2334                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2335                break;
2336
2337        case BFA_FCPORT_SM_HWFAIL:
2338                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2339                break;
2340
2341        case BFA_FCPORT_SM_FAA_MISCONFIG:
2342                bfa_fcport_reset_linkinfo(fcport);
2343                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2344                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2345                break;
2346
2347        default:
2348                bfa_sm_fault(fcport->bfa, event);
2349        }
2350}
2351
2352static void
2353bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2354        enum bfa_fcport_sm_event event)
2355{
2356        char pwwn_buf[BFA_STRING_32];
2357        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2358
2359        bfa_trc(fcport->bfa, event);
2360
2361        switch (event) {
2362        case BFA_FCPORT_SM_ENABLE:
2363                /*
2364                 * Already enabled.
2365                 */
2366                break;
2367
2368        case BFA_FCPORT_SM_DISABLE:
2369                if (bfa_fcport_send_disable(fcport))
2370                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2371                else
2372                        bfa_sm_set_state(fcport,
2373                                         bfa_fcport_sm_disabling_qwait);
2374
2375                bfa_fcport_reset_linkinfo(fcport);
2376                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2377                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2378                                BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2379                wwn2str(pwwn_buf, fcport->pwwn);
2380                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2381                        "Base port offline: WWN = %s\n", pwwn_buf);
2382                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2383                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2384                        "Base port disabled: WWN = %s\n", pwwn_buf);
2385                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2386                break;
2387
2388        case BFA_FCPORT_SM_LINKDOWN:
2389                bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2390                bfa_fcport_reset_linkinfo(fcport);
2391                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2392                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2393                                BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2394                wwn2str(pwwn_buf, fcport->pwwn);
2395                if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2396                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2397                                "Base port offline: WWN = %s\n", pwwn_buf);
2398                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2399                } else {
2400                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2401                                "Base port (WWN = %s) "
2402                                "lost fabric connectivity\n", pwwn_buf);
2403                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2404                }
2405                break;
2406
2407        case BFA_FCPORT_SM_STOP:
2408                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2409                bfa_fcport_reset_linkinfo(fcport);
2410                wwn2str(pwwn_buf, fcport->pwwn);
2411                if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2412                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2413                                "Base port offline: WWN = %s\n", pwwn_buf);
2414                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2415                } else {
2416                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2417                                "Base port (WWN = %s) "
2418                                "lost fabric connectivity\n", pwwn_buf);
2419                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2420                }
2421                break;
2422
2423        case BFA_FCPORT_SM_HWFAIL:
2424                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2425                bfa_fcport_reset_linkinfo(fcport);
2426                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2427                wwn2str(pwwn_buf, fcport->pwwn);
2428                if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2429                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2430                                "Base port offline: WWN = %s\n", pwwn_buf);
2431                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2432                } else {
2433                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2434                                "Base port (WWN = %s) "
2435                                "lost fabric connectivity\n", pwwn_buf);
2436                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2437                }
2438                break;
2439
2440        case BFA_FCPORT_SM_FAA_MISCONFIG:
2441                bfa_fcport_reset_linkinfo(fcport);
2442                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2443                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2444                break;
2445
2446        default:
2447                bfa_sm_fault(fcport->bfa, event);
2448        }
2449}
2450
2451static void
2452bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2453                                 enum bfa_fcport_sm_event event)
2454{
2455        bfa_trc(fcport->bfa, event);
2456
2457        switch (event) {
2458        case BFA_FCPORT_SM_QRESUME:
2459                bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2460                bfa_fcport_send_disable(fcport);
2461                break;
2462
2463        case BFA_FCPORT_SM_STOP:
2464                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2465                bfa_reqq_wcancel(&fcport->reqq_wait);
2466                break;
2467
2468        case BFA_FCPORT_SM_ENABLE:
2469                bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2470                break;
2471
2472        case BFA_FCPORT_SM_DISABLE:
2473                /*
2474                 * Already being disabled.
2475                 */
2476                break;
2477
2478        case BFA_FCPORT_SM_LINKUP:
2479        case BFA_FCPORT_SM_LINKDOWN:
2480                /*
2481                 * Possible to get link events when doing back-to-back
2482                 * enable/disables.
2483                 */
2484                break;
2485
2486        case BFA_FCPORT_SM_HWFAIL:
2487                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2488                bfa_reqq_wcancel(&fcport->reqq_wait);
2489                break;
2490
2491        case BFA_FCPORT_SM_FAA_MISCONFIG:
2492                bfa_fcport_reset_linkinfo(fcport);
2493                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2494                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2495                break;
2496
2497        default:
2498                bfa_sm_fault(fcport->bfa, event);
2499        }
2500}
2501
2502static void
2503bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2504                                 enum bfa_fcport_sm_event event)
2505{
2506        bfa_trc(fcport->bfa, event);
2507
2508        switch (event) {
2509        case BFA_FCPORT_SM_QRESUME:
2510                bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2511                bfa_fcport_send_disable(fcport);
2512                if (bfa_fcport_send_enable(fcport))
2513                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2514                else
2515                        bfa_sm_set_state(fcport,
2516                                         bfa_fcport_sm_enabling_qwait);
2517                break;
2518
2519        case BFA_FCPORT_SM_STOP:
2520                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2521                bfa_reqq_wcancel(&fcport->reqq_wait);
2522                break;
2523
2524        case BFA_FCPORT_SM_ENABLE:
2525                break;
2526
2527        case BFA_FCPORT_SM_DISABLE:
2528                bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2529                break;
2530
2531        case BFA_FCPORT_SM_LINKUP:
2532        case BFA_FCPORT_SM_LINKDOWN:
2533                /*
2534                 * Possible to get link events when doing back-to-back
2535                 * enable/disables.
2536                 */
2537                break;
2538
2539        case BFA_FCPORT_SM_HWFAIL:
2540                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2541                bfa_reqq_wcancel(&fcport->reqq_wait);
2542                break;
2543
2544        default:
2545                bfa_sm_fault(fcport->bfa, event);
2546        }
2547}
2548
2549static void
2550bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2551                                                enum bfa_fcport_sm_event event)
2552{
2553        char pwwn_buf[BFA_STRING_32];
2554        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2555        bfa_trc(fcport->bfa, event);
2556
2557        switch (event) {
2558        case BFA_FCPORT_SM_FWRSP:
2559                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2560                break;
2561
2562        case BFA_FCPORT_SM_DISABLE:
2563                /*
2564                 * Already being disabled.
2565                 */
2566                break;
2567
2568        case BFA_FCPORT_SM_ENABLE:
2569                if (bfa_fcport_send_enable(fcport))
2570                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2571                else
2572                        bfa_sm_set_state(fcport,
2573                                         bfa_fcport_sm_enabling_qwait);
2574
2575                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2576                                BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2577                wwn2str(pwwn_buf, fcport->pwwn);
2578                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2579                        "Base port enabled: WWN = %s\n", pwwn_buf);
2580                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2581                break;
2582
2583        case BFA_FCPORT_SM_STOP:
2584                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2585                break;
2586
2587        case BFA_FCPORT_SM_LINKUP:
2588        case BFA_FCPORT_SM_LINKDOWN:
2589                /*
2590                 * Possible to get link events when doing back-to-back
2591                 * enable/disables.
2592                 */
2593                break;
2594
2595        case BFA_FCPORT_SM_HWFAIL:
2596                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2597                break;
2598
2599        default:
2600                bfa_sm_fault(fcport->bfa, event);
2601        }
2602}
2603
2604static void
2605bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2606                                                enum bfa_fcport_sm_event event)
2607{
2608        char pwwn_buf[BFA_STRING_32];
2609        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2610        bfa_trc(fcport->bfa, event);
2611
2612        switch (event) {
2613        case BFA_FCPORT_SM_START:
2614                /*
2615                 * Ignore start event for a port that is disabled.
2616                 */
2617                break;
2618
2619        case BFA_FCPORT_SM_STOP:
2620                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2621                break;
2622
2623        case BFA_FCPORT_SM_ENABLE:
2624                if (bfa_fcport_send_enable(fcport))
2625                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2626                else
2627                        bfa_sm_set_state(fcport,
2628                                         bfa_fcport_sm_enabling_qwait);
2629
2630                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2631                                BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2632                wwn2str(pwwn_buf, fcport->pwwn);
2633                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2634                        "Base port enabled: WWN = %s\n", pwwn_buf);
2635                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2636                break;
2637
2638        case BFA_FCPORT_SM_DISABLE:
2639                /*
2640                 * Already disabled.
2641                 */
2642                break;
2643
2644        case BFA_FCPORT_SM_HWFAIL:
2645                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2646                break;
2647
2648        case BFA_FCPORT_SM_DPORTENABLE:
2649                bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2650                break;
2651
2652        default:
2653                bfa_sm_fault(fcport->bfa, event);
2654        }
2655}
2656
2657static void
2658bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2659                         enum bfa_fcport_sm_event event)
2660{
2661        bfa_trc(fcport->bfa, event);
2662
2663        switch (event) {
2664        case BFA_FCPORT_SM_START:
2665                if (bfa_fcport_send_enable(fcport))
2666                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2667                else
2668                        bfa_sm_set_state(fcport,
2669                                         bfa_fcport_sm_enabling_qwait);
2670                break;
2671
2672        default:
2673                /*
2674                 * Ignore all other events.
2675                 */
2676                ;
2677        }
2678}
2679
2680/*
2681 * Port is enabled. IOC is down/failed.
2682 */
2683static void
2684bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2685                         enum bfa_fcport_sm_event event)
2686{
2687        bfa_trc(fcport->bfa, event);
2688
2689        switch (event) {
2690        case BFA_FCPORT_SM_START:
2691                if (bfa_fcport_send_enable(fcport))
2692                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2693                else
2694                        bfa_sm_set_state(fcport,
2695                                         bfa_fcport_sm_enabling_qwait);
2696                break;
2697
2698        default:
2699                /*
2700                 * Ignore all events.
2701                 */
2702                ;
2703        }
2704}
2705
2706/*
2707 * Port is disabled. IOC is down/failed.
2708 */
2709static void
2710bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2711                         enum bfa_fcport_sm_event event)
2712{
2713        bfa_trc(fcport->bfa, event);
2714
2715        switch (event) {
2716        case BFA_FCPORT_SM_START:
2717                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2718                break;
2719
2720        case BFA_FCPORT_SM_ENABLE:
2721                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2722                break;
2723
2724        default:
2725                /*
2726                 * Ignore all events.
2727                 */
2728                ;
2729        }
2730}
2731
2732static void
2733bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2734{
2735        bfa_trc(fcport->bfa, event);
2736
2737        switch (event) {
2738        case BFA_FCPORT_SM_DPORTENABLE:
2739        case BFA_FCPORT_SM_DISABLE:
2740        case BFA_FCPORT_SM_ENABLE:
2741        case BFA_FCPORT_SM_START:
2742                /*
2743                 * Ignore event for a port that is dport
2744                 */
2745                break;
2746
2747        case BFA_FCPORT_SM_STOP:
2748                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2749                break;
2750
2751        case BFA_FCPORT_SM_HWFAIL:
2752                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2753                break;
2754
2755        case BFA_FCPORT_SM_DPORTDISABLE:
2756                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2757                break;
2758
2759        default:
2760                bfa_sm_fault(fcport->bfa, event);
2761        }
2762}
2763
2764static void
2765bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
2766                            enum bfa_fcport_sm_event event)
2767{
2768        bfa_trc(fcport->bfa, event);
2769
2770        switch (event) {
2771        case BFA_FCPORT_SM_DPORTENABLE:
2772        case BFA_FCPORT_SM_ENABLE:
2773        case BFA_FCPORT_SM_START:
2774                /*
2775                 * Ignore event for a port as there is FAA misconfig
2776                 */
2777                break;
2778
2779        case BFA_FCPORT_SM_DISABLE:
2780                if (bfa_fcport_send_disable(fcport))
2781                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2782                else
2783                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2784
2785                bfa_fcport_reset_linkinfo(fcport);
2786                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2787                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2788                             BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2789                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2790                break;
2791
2792        case BFA_FCPORT_SM_STOP:
2793                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2794                break;
2795
2796        case BFA_FCPORT_SM_HWFAIL:
2797                bfa_fcport_reset_linkinfo(fcport);
2798                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2799                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2800                break;
2801
2802        default:
2803                bfa_sm_fault(fcport->bfa, event);
2804        }
2805}
2806
2807/*
2808 * Link state is down
2809 */
2810static void
2811bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2812                enum bfa_fcport_ln_sm_event event)
2813{
2814        bfa_trc(ln->fcport->bfa, event);
2815
2816        switch (event) {
2817        case BFA_FCPORT_LN_SM_LINKUP:
2818                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2819                bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2820                break;
2821
2822        default:
2823                bfa_sm_fault(ln->fcport->bfa, event);
2824        }
2825}
2826
2827/*
2828 * Link state is waiting for down notification
2829 */
2830static void
2831bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2832                enum bfa_fcport_ln_sm_event event)
2833{
2834        bfa_trc(ln->fcport->bfa, event);
2835
2836        switch (event) {
2837        case BFA_FCPORT_LN_SM_LINKUP:
2838                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2839                break;
2840
2841        case BFA_FCPORT_LN_SM_NOTIFICATION:
2842                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2843                break;
2844
2845        default:
2846                bfa_sm_fault(ln->fcport->bfa, event);
2847        }
2848}
2849
2850/*
2851 * Link state is waiting for down notification and there is a pending up
2852 */
2853static void
2854bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2855                enum bfa_fcport_ln_sm_event event)
2856{
2857        bfa_trc(ln->fcport->bfa, event);
2858
2859        switch (event) {
2860        case BFA_FCPORT_LN_SM_LINKDOWN:
2861                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2862                break;
2863
2864        case BFA_FCPORT_LN_SM_NOTIFICATION:
2865                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2866                bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2867                break;
2868
2869        default:
2870                bfa_sm_fault(ln->fcport->bfa, event);
2871        }
2872}
2873
2874/*
2875 * Link state is up
2876 */
2877static void
2878bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2879                enum bfa_fcport_ln_sm_event event)
2880{
2881        bfa_trc(ln->fcport->bfa, event);
2882
2883        switch (event) {
2884        case BFA_FCPORT_LN_SM_LINKDOWN:
2885                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2886                bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2887                break;
2888
2889        default:
2890                bfa_sm_fault(ln->fcport->bfa, event);
2891        }
2892}
2893
2894/*
2895 * Link state is waiting for up notification
2896 */
2897static void
2898bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2899                enum bfa_fcport_ln_sm_event event)
2900{
2901        bfa_trc(ln->fcport->bfa, event);
2902
2903        switch (event) {
2904        case BFA_FCPORT_LN_SM_LINKDOWN:
2905                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2906                break;
2907
2908        case BFA_FCPORT_LN_SM_NOTIFICATION:
2909                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2910                break;
2911
2912        default:
2913                bfa_sm_fault(ln->fcport->bfa, event);
2914        }
2915}
2916
2917/*
2918 * Link state is waiting for up notification and there is a pending down
2919 */
2920static void
2921bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2922                enum bfa_fcport_ln_sm_event event)
2923{
2924        bfa_trc(ln->fcport->bfa, event);
2925
2926        switch (event) {
2927        case BFA_FCPORT_LN_SM_LINKUP:
2928                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2929                break;
2930
2931        case BFA_FCPORT_LN_SM_NOTIFICATION:
2932                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2933                bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2934                break;
2935
2936        default:
2937                bfa_sm_fault(ln->fcport->bfa, event);
2938        }
2939}
2940
2941/*
2942 * Link state is waiting for up notification and there are pending down and up
2943 */
2944static void
2945bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2946                        enum bfa_fcport_ln_sm_event event)
2947{
2948        bfa_trc(ln->fcport->bfa, event);
2949
2950        switch (event) {
2951        case BFA_FCPORT_LN_SM_LINKDOWN:
2952                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2953                break;
2954
2955        case BFA_FCPORT_LN_SM_NOTIFICATION:
2956                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2957                bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2958                break;
2959
2960        default:
2961                bfa_sm_fault(ln->fcport->bfa, event);
2962        }
2963}
2964
2965static void
2966__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2967{
2968        struct bfa_fcport_ln_s *ln = cbarg;
2969
2970        if (complete)
2971                ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2972        else
2973                bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2974}
2975
2976/*
2977 * Send SCN notification to upper layers.
2978 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2979 */
2980static void
2981bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2982        bfa_boolean_t trunk)
2983{
2984        if (fcport->cfg.trunked && !trunk)
2985                return;
2986
2987        switch (event) {
2988        case BFA_PORT_LINKUP:
2989                bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2990                break;
2991        case BFA_PORT_LINKDOWN:
2992                bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2993                break;
2994        default:
2995                WARN_ON(1);
2996        }
2997}
2998
2999static void
3000bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
3001{
3002        struct bfa_fcport_s *fcport = ln->fcport;
3003
3004        if (fcport->bfa->fcs) {
3005                fcport->event_cbfn(fcport->event_cbarg, event);
3006                bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
3007        } else {
3008                ln->ln_event = event;
3009                bfa_cb_queue(fcport->bfa, &ln->ln_qe,
3010                        __bfa_cb_fcport_event, ln);
3011        }
3012}
3013
3014#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
3015                                                        BFA_CACHELINE_SZ))
3016
3017static void
3018bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3019                   struct bfa_s *bfa)
3020{
3021        struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
3022
3023        bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
3024}
3025
3026static void
3027bfa_fcport_qresume(void *cbarg)
3028{
3029        struct bfa_fcport_s *fcport = cbarg;
3030
3031        bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
3032}
3033
3034static void
3035bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
3036{
3037        struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
3038
3039        fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
3040        fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
3041        fcport->stats = (union bfa_fcport_stats_u *)
3042                                bfa_mem_dma_virt(fcport_dma);
3043}
3044
3045/*
3046 * Memory initialization.
3047 */
3048static void
3049bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3050                struct bfa_pcidev_s *pcidev)
3051{
3052        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3053        struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
3054        struct bfa_fcport_ln_s *ln = &fcport->ln;
3055        struct timeval tv;
3056
3057        fcport->bfa = bfa;
3058        ln->fcport = fcport;
3059
3060        bfa_fcport_mem_claim(fcport);
3061
3062        bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
3063        bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
3064
3065        /*
3066         * initialize time stamp for stats reset
3067         */
3068        do_gettimeofday(&tv);
3069        fcport->stats_reset_time = tv.tv_sec;
3070        fcport->stats_dma_ready = BFA_FALSE;
3071
3072        /*
3073         * initialize and set default configuration
3074         */
3075        port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
3076        port_cfg->speed = BFA_PORT_SPEED_AUTO;
3077        port_cfg->trunked = BFA_FALSE;
3078        port_cfg->maxfrsize = 0;
3079
3080        port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3081        port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
3082        port_cfg->qos_bw.med = BFA_QOS_BW_MED;
3083        port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
3084
3085        INIT_LIST_HEAD(&fcport->stats_pending_q);
3086        INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3087
3088        bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
3089}
3090
3091static void
3092bfa_fcport_detach(struct bfa_s *bfa)
3093{
3094}
3095
3096/*
3097 * Called when IOC is ready.
3098 */
3099static void
3100bfa_fcport_start(struct bfa_s *bfa)
3101{
3102        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
3103}
3104
3105/*
3106 * Called before IOC is stopped.
3107 */
3108static void
3109bfa_fcport_stop(struct bfa_s *bfa)
3110{
3111        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
3112        bfa_trunk_iocdisable(bfa);
3113}
3114
3115/*
3116 * Called when IOC failure is detected.
3117 */
3118static void
3119bfa_fcport_iocdisable(struct bfa_s *bfa)
3120{
3121        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3122
3123        bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
3124        bfa_trunk_iocdisable(bfa);
3125}
3126
3127/*
3128 * Update loop info in fcport for SCN online
3129 */
3130static void
3131bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3132                        struct bfa_fcport_loop_info_s *loop_info)
3133{
3134        fcport->myalpa = loop_info->myalpa;
3135        fcport->alpabm_valid =
3136                        loop_info->alpabm_val;
3137        memcpy(fcport->alpabm.alpa_bm,
3138                        loop_info->alpabm.alpa_bm,
3139                        sizeof(struct fc_alpabm_s));
3140}
3141
3142static void
3143bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3144{
3145        struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
3146        struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3147
3148        fcport->speed = pevent->link_state.speed;
3149        fcport->topology = pevent->link_state.topology;
3150
3151        if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3152                bfa_fcport_update_loop_info(fcport,
3153                                &pevent->link_state.attr.loop_info);
3154                return;
3155        }
3156
3157        /* QoS Details */
3158        fcport->qos_attr = pevent->link_state.qos_attr;
3159        fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
3160
3161        /*
3162         * update trunk state if applicable
3163         */
3164        if (!fcport->cfg.trunked)
3165                trunk->attr.state = BFA_TRUNK_DISABLED;
3166
3167        /* update FCoE specific */
3168        fcport->fcoe_vlan =
3169                be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
3170
3171        bfa_trc(fcport->bfa, fcport->speed);
3172        bfa_trc(fcport->bfa, fcport->topology);
3173}
3174
3175static void
3176bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3177{
3178        fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3179        fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3180        fcport->bbsc_op_state = BFA_FALSE;
3181}
3182
3183/*
3184 * Send port enable message to firmware.
3185 */
3186static bfa_boolean_t
3187bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3188{
3189        struct bfi_fcport_enable_req_s *m;
3190
3191        /*
3192         * Increment message tag before queue check, so that responses to old
3193         * requests are discarded.
3194         */
3195        fcport->msgtag++;
3196
3197        /*
3198         * check for room in queue to send request now
3199         */
3200        m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3201        if (!m) {
3202                bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3203                                                        &fcport->reqq_wait);
3204                return BFA_FALSE;
3205        }
3206
3207        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3208                        bfa_fn_lpu(fcport->bfa));
3209        m->nwwn = fcport->nwwn;
3210        m->pwwn = fcport->pwwn;
3211        m->port_cfg = fcport->cfg;
3212        m->msgtag = fcport->msgtag;
3213        m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3214         m->use_flash_cfg = fcport->use_flash_cfg;
3215        bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3216        bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3217        bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3218
3219        /*
3220         * queue I/O message to firmware
3221         */
3222        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3223        return BFA_TRUE;
3224}
3225
3226/*
3227 * Send port disable message to firmware.
3228 */
3229static  bfa_boolean_t
3230bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3231{
3232        struct bfi_fcport_req_s *m;
3233
3234        /*
3235         * Increment message tag before queue check, so that responses to old
3236         * requests are discarded.
3237         */
3238        fcport->msgtag++;
3239
3240        /*
3241         * check for room in queue to send request now
3242         */
3243        m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3244        if (!m) {
3245                bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3246                                                        &fcport->reqq_wait);
3247                return BFA_FALSE;
3248        }
3249
3250        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3251                        bfa_fn_lpu(fcport->bfa));
3252        m->msgtag = fcport->msgtag;
3253
3254        /*
3255         * queue I/O message to firmware
3256         */
3257        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3258
3259        return BFA_TRUE;
3260}
3261
3262static void
3263bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3264{
3265        fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3266        fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3267
3268        bfa_trc(fcport->bfa, fcport->pwwn);
3269        bfa_trc(fcport->bfa, fcport->nwwn);
3270}
3271
3272static void
3273bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3274        struct bfa_qos_stats_s *s)
3275{
3276        u32     *dip = (u32 *) d;
3277        __be32  *sip = (__be32 *) s;
3278        int             i;
3279
3280        /* Now swap the 32 bit fields */
3281        for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3282                dip[i] = be32_to_cpu(sip[i]);
3283}
3284
3285static void
3286bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3287        struct bfa_fcoe_stats_s *s)
3288{
3289        u32     *dip = (u32 *) d;
3290        __be32  *sip = (__be32 *) s;
3291        int             i;
3292
3293        for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3294             i = i + 2) {
3295#ifdef __BIG_ENDIAN
3296                dip[i] = be32_to_cpu(sip[i]);
3297                dip[i + 1] = be32_to_cpu(sip[i + 1]);
3298#else
3299                dip[i] = be32_to_cpu(sip[i + 1]);
3300                dip[i + 1] = be32_to_cpu(sip[i]);
3301#endif
3302        }
3303}
3304
3305static void
3306__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3307{
3308        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3309        struct bfa_cb_pending_q_s *cb;
3310        struct list_head *qe, *qen;
3311        union bfa_fcport_stats_u *ret;
3312
3313        if (complete) {
3314                struct timeval tv;
3315                if (fcport->stats_status == BFA_STATUS_OK)
3316                        do_gettimeofday(&tv);
3317
3318                list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3319                        bfa_q_deq(&fcport->stats_pending_q, &qe);
3320                        cb = (struct bfa_cb_pending_q_s *)qe;
3321                        if (fcport->stats_status == BFA_STATUS_OK) {
3322                                ret = (union bfa_fcport_stats_u *)cb->data;
3323                                /* Swap FC QoS or FCoE stats */
3324                                if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3325                                        bfa_fcport_qos_stats_swap(&ret->fcqos,
3326                                                        &fcport->stats->fcqos);
3327                                else {
3328                                        bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3329                                                        &fcport->stats->fcoe);
3330                                        ret->fcoe.secs_reset =
3331                                        tv.tv_sec - fcport->stats_reset_time;
3332                                }
3333                        }
3334                        bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3335                                        fcport->stats_status);
3336                }
3337                fcport->stats_status = BFA_STATUS_OK;
3338        } else {
3339                INIT_LIST_HEAD(&fcport->stats_pending_q);
3340                fcport->stats_status = BFA_STATUS_OK;
3341        }
3342}
3343
3344static void
3345bfa_fcport_stats_get_timeout(void *cbarg)
3346{
3347        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3348
3349        bfa_trc(fcport->bfa, fcport->stats_qfull);
3350
3351        if (fcport->stats_qfull) {
3352                bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3353                fcport->stats_qfull = BFA_FALSE;
3354        }
3355
3356        fcport->stats_status = BFA_STATUS_ETIMER;
3357        __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3358}
3359
3360static void
3361bfa_fcport_send_stats_get(void *cbarg)
3362{
3363        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3364        struct bfi_fcport_req_s *msg;
3365
3366        msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3367
3368        if (!msg) {
3369                fcport->stats_qfull = BFA_TRUE;
3370                bfa_reqq_winit(&fcport->stats_reqq_wait,
3371                                bfa_fcport_send_stats_get, fcport);
3372                bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3373                                &fcport->stats_reqq_wait);
3374                return;
3375        }
3376        fcport->stats_qfull = BFA_FALSE;
3377
3378        memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3379        bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3380                        bfa_fn_lpu(fcport->bfa));
3381        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3382}
3383
3384static void
3385__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3386{
3387        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3388        struct bfa_cb_pending_q_s *cb;
3389        struct list_head *qe, *qen;
3390
3391        if (complete) {
3392                struct timeval tv;
3393
3394                /*
3395                 * re-initialize time stamp for stats reset
3396                 */
3397                do_gettimeofday(&tv);
3398                fcport->stats_reset_time = tv.tv_sec;
3399                list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3400                        bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3401                        cb = (struct bfa_cb_pending_q_s *)qe;
3402                        bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3403                                                fcport->stats_status);
3404                }
3405                fcport->stats_status = BFA_STATUS_OK;
3406        } else {
3407                INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3408                fcport->stats_status = BFA_STATUS_OK;
3409        }
3410}
3411
3412static void
3413bfa_fcport_stats_clr_timeout(void *cbarg)
3414{
3415        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3416
3417        bfa_trc(fcport->bfa, fcport->stats_qfull);
3418
3419        if (fcport->stats_qfull) {
3420                bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3421                fcport->stats_qfull = BFA_FALSE;
3422        }
3423
3424        fcport->stats_status = BFA_STATUS_ETIMER;
3425        __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3426}
3427
3428static void
3429bfa_fcport_send_stats_clear(void *cbarg)
3430{
3431        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3432        struct bfi_fcport_req_s *msg;
3433
3434        msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3435
3436        if (!msg) {
3437                fcport->stats_qfull = BFA_TRUE;
3438                bfa_reqq_winit(&fcport->stats_reqq_wait,
3439                                bfa_fcport_send_stats_clear, fcport);
3440                bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3441                                                &fcport->stats_reqq_wait);
3442                return;
3443        }
3444        fcport->stats_qfull = BFA_FALSE;
3445
3446        memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3447        bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3448                        bfa_fn_lpu(fcport->bfa));
3449        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3450}
3451
3452/*
3453 * Handle trunk SCN event from firmware.
3454 */
3455static void
3456bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3457{
3458        struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3459        struct bfi_fcport_trunk_link_s *tlink;
3460        struct bfa_trunk_link_attr_s *lattr;
3461        enum bfa_trunk_state state_prev;
3462        int i;
3463        int link_bm = 0;
3464
3465        bfa_trc(fcport->bfa, fcport->cfg.trunked);
3466        WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3467                   scn->trunk_state != BFA_TRUNK_OFFLINE);
3468
3469        bfa_trc(fcport->bfa, trunk->attr.state);
3470        bfa_trc(fcport->bfa, scn->trunk_state);
3471        bfa_trc(fcport->bfa, scn->trunk_speed);
3472
3473        /*
3474         * Save off new state for trunk attribute query
3475         */
3476        state_prev = trunk->attr.state;
3477        if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3478                trunk->attr.state = scn->trunk_state;
3479        trunk->attr.speed = scn->trunk_speed;
3480        for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3481                lattr = &trunk->attr.link_attr[i];
3482                tlink = &scn->tlink[i];
3483
3484                lattr->link_state = tlink->state;
3485                lattr->trunk_wwn  = tlink->trunk_wwn;
3486                lattr->fctl       = tlink->fctl;
3487                lattr->speed      = tlink->speed;
3488                lattr->deskew     = be32_to_cpu(tlink->deskew);
3489
3490                if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3491                        fcport->speed    = tlink->speed;
3492                        fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3493                        link_bm |= 1 << i;
3494                }
3495
3496                bfa_trc(fcport->bfa, lattr->link_state);
3497                bfa_trc(fcport->bfa, lattr->trunk_wwn);
3498                bfa_trc(fcport->bfa, lattr->fctl);
3499                bfa_trc(fcport->bfa, lattr->speed);
3500                bfa_trc(fcport->bfa, lattr->deskew);
3501        }
3502
3503        switch (link_bm) {
3504        case 3:
3505                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3506                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3507                break;
3508        case 2:
3509                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3510                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3511                break;
3512        case 1:
3513                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3514                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3515                break;
3516        default:
3517                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3518                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3519        }
3520
3521        /*
3522         * Notify upper layers if trunk state changed.
3523         */
3524        if ((state_prev != trunk->attr.state) ||
3525                (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3526                bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3527                        BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3528        }
3529}
3530
3531static void
3532bfa_trunk_iocdisable(struct bfa_s *bfa)
3533{
3534        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3535        int i = 0;
3536
3537        /*
3538         * In trunked mode, notify upper layers that link is down
3539         */
3540        if (fcport->cfg.trunked) {
3541                if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3542                        bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3543
3544                fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3545                fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3546                for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3547                        fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3548                        fcport->trunk.attr.link_attr[i].fctl =
3549                                                BFA_TRUNK_LINK_FCTL_NORMAL;
3550                        fcport->trunk.attr.link_attr[i].link_state =
3551                                                BFA_TRUNK_LINK_STATE_DN_LINKDN;
3552                        fcport->trunk.attr.link_attr[i].speed =
3553                                                BFA_PORT_SPEED_UNKNOWN;
3554                        fcport->trunk.attr.link_attr[i].deskew = 0;
3555                }
3556        }
3557}
3558
3559/*
3560 * Called to initialize port attributes
3561 */
3562void
3563bfa_fcport_init(struct bfa_s *bfa)
3564{
3565        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3566
3567        /*
3568         * Initialize port attributes from IOC hardware data.
3569         */
3570        bfa_fcport_set_wwns(fcport);
3571        if (fcport->cfg.maxfrsize == 0)
3572                fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3573        fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3574        fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3575
3576        if (bfa_fcport_is_pbcdisabled(bfa))
3577                bfa->modules.port.pbc_disabled = BFA_TRUE;
3578
3579        WARN_ON(!fcport->cfg.maxfrsize);
3580        WARN_ON(!fcport->cfg.rx_bbcredit);
3581        WARN_ON(!fcport->speed_sup);
3582}
3583
3584/*
3585 * Firmware message handler.
3586 */
3587void
3588bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3589{
3590        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3591        union bfi_fcport_i2h_msg_u i2hmsg;
3592
3593        i2hmsg.msg = msg;
3594        fcport->event_arg.i2hmsg = i2hmsg;
3595
3596        bfa_trc(bfa, msg->mhdr.msg_id);
3597        bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3598
3599        switch (msg->mhdr.msg_id) {
3600        case BFI_FCPORT_I2H_ENABLE_RSP:
3601                if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3602
3603                        fcport->stats_dma_ready = BFA_TRUE;
3604                        if (fcport->use_flash_cfg) {
3605                                fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3606                                fcport->cfg.maxfrsize =
3607                                        cpu_to_be16(fcport->cfg.maxfrsize);
3608                                fcport->cfg.path_tov =
3609                                        cpu_to_be16(fcport->cfg.path_tov);
3610                                fcport->cfg.q_depth =
3611                                        cpu_to_be16(fcport->cfg.q_depth);
3612
3613                                if (fcport->cfg.trunked)
3614                                        fcport->trunk.attr.state =
3615                                                BFA_TRUNK_OFFLINE;
3616                                else
3617                                        fcport->trunk.attr.state =
3618                                                BFA_TRUNK_DISABLED;
3619                                fcport->qos_attr.qos_bw =
3620                                        i2hmsg.penable_rsp->port_cfg.qos_bw;
3621                                fcport->use_flash_cfg = BFA_FALSE;
3622                        }
3623
3624                        if (fcport->cfg.qos_enabled)
3625                                fcport->qos_attr.state = BFA_QOS_OFFLINE;
3626                        else
3627                                fcport->qos_attr.state = BFA_QOS_DISABLED;
3628
3629                        fcport->qos_attr.qos_bw_op =
3630                                        i2hmsg.penable_rsp->port_cfg.qos_bw;
3631
3632                        bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3633                }
3634                break;
3635
3636        case BFI_FCPORT_I2H_DISABLE_RSP:
3637                if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3638                        bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3639                break;
3640
3641        case BFI_FCPORT_I2H_EVENT:
3642                if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3643                        bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3644                else {
3645                        if (i2hmsg.event->link_state.linkstate_rsn ==
3646                            BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
3647                                bfa_sm_send_event(fcport,
3648                                                  BFA_FCPORT_SM_FAA_MISCONFIG);
3649                        else
3650                                bfa_sm_send_event(fcport,
3651                                                  BFA_FCPORT_SM_LINKDOWN);
3652                }
3653                fcport->qos_attr.qos_bw_op =
3654                                i2hmsg.event->link_state.qos_attr.qos_bw_op;
3655                break;
3656
3657        case BFI_FCPORT_I2H_TRUNK_SCN:
3658                bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3659                break;
3660
3661        case BFI_FCPORT_I2H_STATS_GET_RSP:
3662                /*
3663                 * check for timer pop before processing the rsp
3664                 */
3665                if (list_empty(&fcport->stats_pending_q) ||
3666                    (fcport->stats_status == BFA_STATUS_ETIMER))
3667                        break;
3668
3669                bfa_timer_stop(&fcport->timer);
3670                fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3671                __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3672                break;
3673
3674        case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3675                /*
3676                 * check for timer pop before processing the rsp
3677                 */
3678                if (list_empty(&fcport->statsclr_pending_q) ||
3679                    (fcport->stats_status == BFA_STATUS_ETIMER))
3680                        break;
3681
3682                bfa_timer_stop(&fcport->timer);
3683                fcport->stats_status = BFA_STATUS_OK;
3684                __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3685                break;
3686
3687        case BFI_FCPORT_I2H_ENABLE_AEN:
3688                bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3689                break;
3690
3691        case BFI_FCPORT_I2H_DISABLE_AEN:
3692                bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3693                break;
3694
3695        default:
3696                WARN_ON(1);
3697        break;
3698        }
3699}
3700
3701/*
3702 * Registered callback for port events.
3703 */
3704void
3705bfa_fcport_event_register(struct bfa_s *bfa,
3706                                void (*cbfn) (void *cbarg,
3707                                enum bfa_port_linkstate event),
3708                                void *cbarg)
3709{
3710        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3711
3712        fcport->event_cbfn = cbfn;
3713        fcport->event_cbarg = cbarg;
3714}
3715
3716bfa_status_t
3717bfa_fcport_enable(struct bfa_s *bfa)
3718{
3719        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3720
3721        if (bfa_fcport_is_pbcdisabled(bfa))
3722                return BFA_STATUS_PBC;
3723
3724        if (bfa_ioc_is_disabled(&bfa->ioc))
3725                return BFA_STATUS_IOC_DISABLED;
3726
3727        if (fcport->diag_busy)
3728                return BFA_STATUS_DIAG_BUSY;
3729
3730        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3731        return BFA_STATUS_OK;
3732}
3733
3734bfa_status_t
3735bfa_fcport_disable(struct bfa_s *bfa)
3736{
3737        if (bfa_fcport_is_pbcdisabled(bfa))
3738                return BFA_STATUS_PBC;
3739
3740        if (bfa_ioc_is_disabled(&bfa->ioc))
3741                return BFA_STATUS_IOC_DISABLED;
3742
3743        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3744        return BFA_STATUS_OK;
3745}
3746
3747/* If PBC is disabled on port, return error */
3748bfa_status_t
3749bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3750{
3751        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3752        struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3753        struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3754
3755        if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3756                bfa_trc(bfa, fcport->pwwn);
3757                return BFA_STATUS_PBC;
3758        }
3759        return BFA_STATUS_OK;
3760}
3761
3762/*
3763 * Configure port speed.
3764 */
3765bfa_status_t
3766bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3767{
3768        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3769
3770        bfa_trc(bfa, speed);
3771
3772        if (fcport->cfg.trunked == BFA_TRUE)
3773                return BFA_STATUS_TRUNK_ENABLED;
3774        if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3775                        (speed == BFA_PORT_SPEED_16GBPS))
3776                return BFA_STATUS_UNSUPP_SPEED;
3777        if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3778                bfa_trc(bfa, fcport->speed_sup);
3779                return BFA_STATUS_UNSUPP_SPEED;
3780        }
3781
3782        /* Port speed entered needs to be checked */
3783        if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3784                /* For CT2, 1G is not supported */
3785                if ((speed == BFA_PORT_SPEED_1GBPS) &&
3786                    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3787                        return BFA_STATUS_UNSUPP_SPEED;
3788
3789                /* Already checked for Auto Speed and Max Speed supp */
3790                if (!(speed == BFA_PORT_SPEED_1GBPS ||
3791                      speed == BFA_PORT_SPEED_2GBPS ||
3792                      speed == BFA_PORT_SPEED_4GBPS ||
3793                      speed == BFA_PORT_SPEED_8GBPS ||
3794                      speed == BFA_PORT_SPEED_16GBPS ||
3795                      speed == BFA_PORT_SPEED_AUTO))
3796                        return BFA_STATUS_UNSUPP_SPEED;
3797        } else {
3798                if (speed != BFA_PORT_SPEED_10GBPS)
3799                        return BFA_STATUS_UNSUPP_SPEED;
3800        }
3801
3802        fcport->cfg.speed = speed;
3803
3804        return BFA_STATUS_OK;
3805}
3806
3807/*
3808 * Get current speed.
3809 */
3810enum bfa_port_speed
3811bfa_fcport_get_speed(struct bfa_s *bfa)
3812{
3813        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3814
3815        return fcport->speed;
3816}
3817
3818/*
3819 * Configure port topology.
3820 */
3821bfa_status_t
3822bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3823{
3824        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3825
3826        bfa_trc(bfa, topology);
3827        bfa_trc(bfa, fcport->cfg.topology);
3828
3829        switch (topology) {
3830        case BFA_PORT_TOPOLOGY_P2P:
3831                break;
3832
3833        case BFA_PORT_TOPOLOGY_LOOP:
3834                if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3835                        (fcport->qos_attr.state != BFA_QOS_DISABLED))
3836                        return BFA_STATUS_ERROR_QOS_ENABLED;
3837                if (fcport->cfg.ratelimit != BFA_FALSE)
3838                        return BFA_STATUS_ERROR_TRL_ENABLED;
3839                if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3840                        (fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3841                        return BFA_STATUS_ERROR_TRUNK_ENABLED;
3842                if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3843                        (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3844                        return BFA_STATUS_UNSUPP_SPEED;
3845                if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3846                        return BFA_STATUS_LOOP_UNSUPP_MEZZ;
3847                if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3848                        return BFA_STATUS_DPORT_ERR;
3849                break;
3850
3851        case BFA_PORT_TOPOLOGY_AUTO:
3852                break;
3853
3854        default:
3855                return BFA_STATUS_EINVAL;
3856        }
3857
3858        fcport->cfg.topology = topology;
3859        return BFA_STATUS_OK;
3860}
3861
3862/*
3863 * Get current topology.
3864 */
3865enum bfa_port_topology
3866bfa_fcport_get_topology(struct bfa_s *bfa)
3867{
3868        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3869
3870        return fcport->topology;
3871}
3872
3873/**
3874 * Get config topology.
3875 */
3876enum bfa_port_topology
3877bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3878{
3879        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3880
3881        return fcport->cfg.topology;
3882}
3883
3884bfa_status_t
3885bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3886{
3887        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3888
3889        bfa_trc(bfa, alpa);
3890        bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3891        bfa_trc(bfa, fcport->cfg.hardalpa);
3892
3893        fcport->cfg.cfg_hardalpa = BFA_TRUE;
3894        fcport->cfg.hardalpa = alpa;
3895
3896        return BFA_STATUS_OK;
3897}
3898
3899bfa_status_t
3900bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3901{
3902        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3903
3904        bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3905        bfa_trc(bfa, fcport->cfg.hardalpa);
3906
3907        fcport->cfg.cfg_hardalpa = BFA_FALSE;
3908        return BFA_STATUS_OK;
3909}
3910
3911bfa_boolean_t
3912bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3913{
3914        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3915
3916        *alpa = fcport->cfg.hardalpa;
3917        return fcport->cfg.cfg_hardalpa;
3918}
3919
3920u8
3921bfa_fcport_get_myalpa(struct bfa_s *bfa)
3922{
3923        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3924
3925        return fcport->myalpa;
3926}
3927
3928bfa_status_t
3929bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3930{
3931        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3932
3933        bfa_trc(bfa, maxfrsize);
3934        bfa_trc(bfa, fcport->cfg.maxfrsize);
3935
3936        /* with in range */
3937        if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3938                return BFA_STATUS_INVLD_DFSZ;
3939
3940        /* power of 2, if not the max frame size of 2112 */
3941        if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3942                return BFA_STATUS_INVLD_DFSZ;
3943
3944        fcport->cfg.maxfrsize = maxfrsize;
3945        return BFA_STATUS_OK;
3946}
3947
3948u16
3949bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3950{
3951        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3952
3953        return fcport->cfg.maxfrsize;
3954}
3955
3956u8
3957bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3958{
3959        if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
3960                return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
3961
3962        else
3963                return 0;
3964}
3965
3966void
3967bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
3968{
3969        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3970
3971        fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3972        fcport->cfg.bb_scn = bb_scn;
3973        if (bb_scn)
3974                fcport->bbsc_op_state = BFA_TRUE;
3975}
3976
3977/*
3978 * Get port attributes.
3979 */
3980
3981wwn_t
3982bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3983{
3984        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3985        if (node)
3986                return fcport->nwwn;
3987        else
3988                return fcport->pwwn;
3989}
3990
3991void
3992bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3993{
3994        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3995
3996        memset(attr, 0, sizeof(struct bfa_port_attr_s));
3997
3998        attr->nwwn = fcport->nwwn;
3999        attr->pwwn = fcport->pwwn;
4000
4001        attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
4002        attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
4003
4004        memcpy(&attr->pport_cfg, &fcport->cfg,
4005                sizeof(struct bfa_port_cfg_s));
4006        /* speed attributes */
4007        attr->pport_cfg.speed = fcport->cfg.speed;
4008        attr->speed_supported = fcport->speed_sup;
4009        attr->speed = fcport->speed;
4010        attr->cos_supported = FC_CLASS_3;
4011
4012        /* topology attributes */
4013        attr->pport_cfg.topology = fcport->cfg.topology;
4014        attr->topology = fcport->topology;
4015        attr->pport_cfg.trunked = fcport->cfg.trunked;
4016
4017        /* beacon attributes */
4018        attr->beacon = fcport->beacon;
4019        attr->link_e2e_beacon = fcport->link_e2e_beacon;
4020
4021        attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
4022        attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
4023        attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
4024        attr->bbsc_op_status =  fcport->bbsc_op_state;
4025
4026        /* PBC Disabled State */
4027        if (bfa_fcport_is_pbcdisabled(bfa))
4028                attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
4029        else {
4030                if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
4031                        attr->port_state = BFA_PORT_ST_IOCDIS;
4032                else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
4033                        attr->port_state = BFA_PORT_ST_FWMISMATCH;
4034        }
4035
4036        /* FCoE vlan */
4037        attr->fcoe_vlan = fcport->fcoe_vlan;
4038}
4039
4040#define BFA_FCPORT_STATS_TOV    1000
4041
4042/*
4043 * Fetch port statistics (FCQoS or FCoE).
4044 */
4045bfa_status_t
4046bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4047{
4048        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4049
4050        if (!bfa_iocfc_is_operational(bfa) ||
4051            !fcport->stats_dma_ready)
4052                return BFA_STATUS_IOC_NON_OP;
4053
4054        if (!list_empty(&fcport->statsclr_pending_q))
4055                return BFA_STATUS_DEVBUSY;
4056
4057        if (list_empty(&fcport->stats_pending_q)) {
4058                list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4059                bfa_fcport_send_stats_get(fcport);
4060                bfa_timer_start(bfa, &fcport->timer,
4061                                bfa_fcport_stats_get_timeout,
4062                                fcport, BFA_FCPORT_STATS_TOV);
4063        } else
4064                list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4065
4066        return BFA_STATUS_OK;
4067}
4068
4069/*
4070 * Reset port statistics (FCQoS or FCoE).
4071 */
4072bfa_status_t
4073bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4074{
4075        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4076
4077        if (!bfa_iocfc_is_operational(bfa) ||
4078            !fcport->stats_dma_ready)
4079                return BFA_STATUS_IOC_NON_OP;
4080
4081        if (!list_empty(&fcport->stats_pending_q))
4082                return BFA_STATUS_DEVBUSY;
4083
4084        if (list_empty(&fcport->statsclr_pending_q)) {
4085                list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4086                bfa_fcport_send_stats_clear(fcport);
4087                bfa_timer_start(bfa, &fcport->timer,
4088                                bfa_fcport_stats_clr_timeout,
4089                                fcport, BFA_FCPORT_STATS_TOV);
4090        } else
4091                list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4092
4093        return BFA_STATUS_OK;
4094}
4095
4096/*
4097 * Fetch port attributes.
4098 */
4099bfa_boolean_t
4100bfa_fcport_is_disabled(struct bfa_s *bfa)
4101{
4102        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4103
4104        return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4105                BFA_PORT_ST_DISABLED;
4106
4107}
4108
4109bfa_boolean_t
4110bfa_fcport_is_dport(struct bfa_s *bfa)
4111{
4112        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4113
4114        return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4115                BFA_PORT_ST_DPORT);
4116}
4117
4118bfa_status_t
4119bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
4120{
4121        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4122        enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
4123
4124        bfa_trc(bfa, ioc_type);
4125
4126        if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
4127                return BFA_STATUS_QOS_BW_INVALID;
4128
4129        if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
4130                return BFA_STATUS_QOS_BW_INVALID;
4131
4132        if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
4133            (qos_bw->low > qos_bw->high))
4134                return BFA_STATUS_QOS_BW_INVALID;
4135
4136        if ((ioc_type == BFA_IOC_TYPE_FC) &&
4137            (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
4138                fcport->cfg.qos_bw = *qos_bw;
4139
4140        return BFA_STATUS_OK;
4141}
4142
4143bfa_boolean_t
4144bfa_fcport_is_ratelim(struct bfa_s *bfa)
4145{
4146        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4147
4148        return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
4149
4150}
4151
4152/*
4153 *      Enable/Disable FAA feature in port config
4154 */
4155void
4156bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
4157{
4158        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4159
4160        bfa_trc(bfa, state);
4161        fcport->cfg.faa_state = state;
4162}
4163
4164/*
4165 * Get default minimum ratelim speed
4166 */
4167enum bfa_port_speed
4168bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
4169{
4170        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4171
4172        bfa_trc(bfa, fcport->cfg.trl_def_speed);
4173        return fcport->cfg.trl_def_speed;
4174
4175}
4176
4177void
4178bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4179                  bfa_boolean_t link_e2e_beacon)
4180{
4181        struct bfa_s *bfa = dev;
4182        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4183
4184        bfa_trc(bfa, beacon);
4185        bfa_trc(bfa, link_e2e_beacon);
4186        bfa_trc(bfa, fcport->beacon);
4187        bfa_trc(bfa, fcport->link_e2e_beacon);
4188
4189        fcport->beacon = beacon;
4190        fcport->link_e2e_beacon = link_e2e_beacon;
4191}
4192
4193bfa_boolean_t
4194bfa_fcport_is_linkup(struct bfa_s *bfa)
4195{
4196        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4197
4198        return  (!fcport->cfg.trunked &&
4199                 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4200                (fcport->cfg.trunked &&
4201                 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4202}
4203
4204bfa_boolean_t
4205bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4206{
4207        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4208
4209        return fcport->cfg.qos_enabled;
4210}
4211
4212bfa_boolean_t
4213bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
4214{
4215        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4216
4217        return fcport->cfg.trunked;
4218}
4219
4220void
4221bfa_fcport_dportenable(struct bfa_s *bfa)
4222{
4223        /*
4224         * Assume caller check for port is in disable state
4225         */
4226        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4227        bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4228}
4229
4230void
4231bfa_fcport_dportdisable(struct bfa_s *bfa)
4232{
4233        /*
4234         * Assume caller check for port is in disable state
4235         */
4236        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4237        bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4238}
4239
4240/*
4241 * Rport State machine functions
4242 */
4243/*
4244 * Beginning state, only online event expected.
4245 */
4246static void
4247bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4248{
4249        bfa_trc(rp->bfa, rp->rport_tag);
4250        bfa_trc(rp->bfa, event);
4251
4252        switch (event) {
4253        case BFA_RPORT_SM_CREATE:
4254                bfa_stats(rp, sm_un_cr);
4255                bfa_sm_set_state(rp, bfa_rport_sm_created);
4256                break;
4257
4258        default:
4259                bfa_stats(rp, sm_un_unexp);
4260                bfa_sm_fault(rp->bfa, event);
4261        }
4262}
4263
4264static void
4265bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4266{
4267        bfa_trc(rp->bfa, rp->rport_tag);
4268        bfa_trc(rp->bfa, event);
4269
4270        switch (event) {
4271        case BFA_RPORT_SM_ONLINE:
4272                bfa_stats(rp, sm_cr_on);
4273                if (bfa_rport_send_fwcreate(rp))
4274                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4275                else
4276                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4277                break;
4278
4279        case BFA_RPORT_SM_DELETE:
4280                bfa_stats(rp, sm_cr_del);
4281                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4282                bfa_rport_free(rp);
4283                break;
4284
4285        case BFA_RPORT_SM_HWFAIL:
4286                bfa_stats(rp, sm_cr_hwf);
4287                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4288                break;
4289
4290        default:
4291                bfa_stats(rp, sm_cr_unexp);
4292                bfa_sm_fault(rp->bfa, event);
4293        }
4294}
4295
4296/*
4297 * Waiting for rport create response from firmware.
4298 */
4299static void
4300bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4301{
4302        bfa_trc(rp->bfa, rp->rport_tag);
4303        bfa_trc(rp->bfa, event);
4304
4305        switch (event) {
4306        case BFA_RPORT_SM_FWRSP:
4307                bfa_stats(rp, sm_fwc_rsp);
4308                bfa_sm_set_state(rp, bfa_rport_sm_online);
4309                bfa_rport_online_cb(rp);
4310                break;
4311
4312        case BFA_RPORT_SM_DELETE:
4313                bfa_stats(rp, sm_fwc_del);
4314                bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4315                break;
4316
4317        case BFA_RPORT_SM_OFFLINE:
4318                bfa_stats(rp, sm_fwc_off);
4319                bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4320                break;
4321
4322        case BFA_RPORT_SM_HWFAIL:
4323                bfa_stats(rp, sm_fwc_hwf);
4324                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4325                break;
4326
4327        default:
4328                bfa_stats(rp, sm_fwc_unexp);
4329                bfa_sm_fault(rp->bfa, event);
4330        }
4331}
4332
4333/*
4334 * Request queue is full, awaiting queue resume to send create request.
4335 */
4336static void
4337bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4338{
4339        bfa_trc(rp->bfa, rp->rport_tag);
4340        bfa_trc(rp->bfa, event);
4341
4342        switch (event) {
4343        case BFA_RPORT_SM_QRESUME:
4344                bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4345                bfa_rport_send_fwcreate(rp);
4346                break;
4347
4348        case BFA_RPORT_SM_DELETE:
4349                bfa_stats(rp, sm_fwc_del);
4350                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4351                bfa_reqq_wcancel(&rp->reqq_wait);
4352                bfa_rport_free(rp);
4353                break;
4354
4355        case BFA_RPORT_SM_OFFLINE:
4356                bfa_stats(rp, sm_fwc_off);
4357                bfa_sm_set_state(rp, bfa_rport_sm_offline);
4358                bfa_reqq_wcancel(&rp->reqq_wait);
4359                bfa_rport_offline_cb(rp);
4360                break;
4361
4362        case BFA_RPORT_SM_HWFAIL:
4363                bfa_stats(rp, sm_fwc_hwf);
4364                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4365                bfa_reqq_wcancel(&rp->reqq_wait);
4366                break;
4367
4368        default:
4369                bfa_stats(rp, sm_fwc_unexp);
4370                bfa_sm_fault(rp->bfa, event);
4371        }
4372}
4373
4374/*
4375 * Online state - normal parking state.
4376 */
4377static void
4378bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4379{
4380        struct bfi_rport_qos_scn_s *qos_scn;
4381
4382        bfa_trc(rp->bfa, rp->rport_tag);
4383        bfa_trc(rp->bfa, event);
4384
4385        switch (event) {
4386        case BFA_RPORT_SM_OFFLINE:
4387                bfa_stats(rp, sm_on_off);
4388                if (bfa_rport_send_fwdelete(rp))
4389                        bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4390                else
4391                        bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4392                break;
4393
4394        case BFA_RPORT_SM_DELETE:
4395                bfa_stats(rp, sm_on_del);
4396                if (bfa_rport_send_fwdelete(rp))
4397                        bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4398                else
4399                        bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4400                break;
4401
4402        case BFA_RPORT_SM_HWFAIL:
4403                bfa_stats(rp, sm_on_hwf);
4404                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4405                break;
4406
4407        case BFA_RPORT_SM_SET_SPEED:
4408                bfa_rport_send_fwspeed(rp);
4409                break;
4410
4411        case BFA_RPORT_SM_QOS_SCN:
4412                qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4413                rp->qos_attr = qos_scn->new_qos_attr;
4414                bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4415                bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4416                bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4417                bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4418
4419                qos_scn->old_qos_attr.qos_flow_id  =
4420                        be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4421                qos_scn->new_qos_attr.qos_flow_id  =
4422                        be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4423
4424                if (qos_scn->old_qos_attr.qos_flow_id !=
4425                        qos_scn->new_qos_attr.qos_flow_id)
4426                        bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4427                                                    qos_scn->old_qos_attr,
4428                                                    qos_scn->new_qos_attr);
4429                if (qos_scn->old_qos_attr.qos_priority !=
4430                        qos_scn->new_qos_attr.qos_priority)
4431                        bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4432                                                  qos_scn->old_qos_attr,
4433                                                  qos_scn->new_qos_attr);
4434                break;
4435
4436        default:
4437                bfa_stats(rp, sm_on_unexp);
4438                bfa_sm_fault(rp->bfa, event);
4439        }
4440}
4441
4442/*
4443 * Firmware rport is being deleted - awaiting f/w response.
4444 */
4445static void
4446bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4447{
4448        bfa_trc(rp->bfa, rp->rport_tag);
4449        bfa_trc(rp->bfa, event);
4450
4451        switch (event) {
4452        case BFA_RPORT_SM_FWRSP:
4453                bfa_stats(rp, sm_fwd_rsp);
4454                bfa_sm_set_state(rp, bfa_rport_sm_offline);
4455                bfa_rport_offline_cb(rp);
4456                break;
4457
4458        case BFA_RPORT_SM_DELETE:
4459                bfa_stats(rp, sm_fwd_del);
4460                bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4461                break;
4462
4463        case BFA_RPORT_SM_HWFAIL:
4464                bfa_stats(rp, sm_fwd_hwf);
4465                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4466                bfa_rport_offline_cb(rp);
4467                break;
4468
4469        default:
4470                bfa_stats(rp, sm_fwd_unexp);
4471                bfa_sm_fault(rp->bfa, event);
4472        }
4473}
4474
4475static void
4476bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4477{
4478        bfa_trc(rp->bfa, rp->rport_tag);
4479        bfa_trc(rp->bfa, event);
4480
4481        switch (event) {
4482        case BFA_RPORT_SM_QRESUME:
4483                bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4484                bfa_rport_send_fwdelete(rp);
4485                break;
4486
4487        case BFA_RPORT_SM_DELETE:
4488                bfa_stats(rp, sm_fwd_del);
4489                bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4490                break;
4491
4492        case BFA_RPORT_SM_HWFAIL:
4493                bfa_stats(rp, sm_fwd_hwf);
4494                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4495                bfa_reqq_wcancel(&rp->reqq_wait);
4496                bfa_rport_offline_cb(rp);
4497                break;
4498
4499        default:
4500                bfa_stats(rp, sm_fwd_unexp);
4501                bfa_sm_fault(rp->bfa, event);
4502        }
4503}
4504
4505/*
4506 * Offline state.
4507 */
4508static void
4509bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4510{
4511        bfa_trc(rp->bfa, rp->rport_tag);
4512        bfa_trc(rp->bfa, event);
4513
4514        switch (event) {
4515        case BFA_RPORT_SM_DELETE:
4516                bfa_stats(rp, sm_off_del);
4517                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4518                bfa_rport_free(rp);
4519                break;
4520
4521        case BFA_RPORT_SM_ONLINE:
4522                bfa_stats(rp, sm_off_on);
4523                if (bfa_rport_send_fwcreate(rp))
4524                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4525                else
4526                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4527                break;
4528
4529        case BFA_RPORT_SM_HWFAIL:
4530                bfa_stats(rp, sm_off_hwf);
4531                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4532                break;
4533
4534        case BFA_RPORT_SM_OFFLINE:
4535                bfa_rport_offline_cb(rp);
4536                break;
4537
4538        default:
4539                bfa_stats(rp, sm_off_unexp);
4540                bfa_sm_fault(rp->bfa, event);
4541        }
4542}
4543
4544/*
4545 * Rport is deleted, waiting for firmware response to delete.
4546 */
4547static void
4548bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4549{
4550        bfa_trc(rp->bfa, rp->rport_tag);
4551        bfa_trc(rp->bfa, event);
4552
4553        switch (event) {
4554        case BFA_RPORT_SM_FWRSP:
4555                bfa_stats(rp, sm_del_fwrsp);
4556                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4557                bfa_rport_free(rp);
4558                break;
4559
4560        case BFA_RPORT_SM_HWFAIL:
4561                bfa_stats(rp, sm_del_hwf);
4562                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4563                bfa_rport_free(rp);
4564                break;
4565
4566        default:
4567                bfa_sm_fault(rp->bfa, event);
4568        }
4569}
4570
4571static void
4572bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4573{
4574        bfa_trc(rp->bfa, rp->rport_tag);
4575        bfa_trc(rp->bfa, event);
4576
4577        switch (event) {
4578        case BFA_RPORT_SM_QRESUME:
4579                bfa_stats(rp, sm_del_fwrsp);
4580                bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4581                bfa_rport_send_fwdelete(rp);
4582                break;
4583
4584        case BFA_RPORT_SM_HWFAIL:
4585                bfa_stats(rp, sm_del_hwf);
4586                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4587                bfa_reqq_wcancel(&rp->reqq_wait);
4588                bfa_rport_free(rp);
4589                break;
4590
4591        default:
4592                bfa_sm_fault(rp->bfa, event);
4593        }
4594}
4595
4596/*
4597 * Waiting for rport create response from firmware. A delete is pending.
4598 */
4599static void
4600bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4601                                enum bfa_rport_event event)
4602{
4603        bfa_trc(rp->bfa, rp->rport_tag);
4604        bfa_trc(rp->bfa, event);
4605
4606        switch (event) {
4607        case BFA_RPORT_SM_FWRSP:
4608                bfa_stats(rp, sm_delp_fwrsp);
4609                if (bfa_rport_send_fwdelete(rp))
4610                        bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4611                else
4612                        bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4613                break;
4614
4615        case BFA_RPORT_SM_HWFAIL:
4616                bfa_stats(rp, sm_delp_hwf);
4617                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4618                bfa_rport_free(rp);
4619                break;
4620
4621        default:
4622                bfa_stats(rp, sm_delp_unexp);
4623                bfa_sm_fault(rp->bfa, event);
4624        }
4625}
4626
4627/*
4628 * Waiting for rport create response from firmware. Rport offline is pending.
4629 */
4630static void
4631bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4632                                 enum bfa_rport_event event)
4633{
4634        bfa_trc(rp->bfa, rp->rport_tag);
4635        bfa_trc(rp->bfa, event);
4636
4637        switch (event) {
4638        case BFA_RPORT_SM_FWRSP:
4639                bfa_stats(rp, sm_offp_fwrsp);
4640                if (bfa_rport_send_fwdelete(rp))
4641                        bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4642                else
4643                        bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4644                break;
4645
4646        case BFA_RPORT_SM_DELETE:
4647                bfa_stats(rp, sm_offp_del);
4648                bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4649                break;
4650
4651        case BFA_RPORT_SM_HWFAIL:
4652                bfa_stats(rp, sm_offp_hwf);
4653                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4654                bfa_rport_offline_cb(rp);
4655                break;
4656
4657        default:
4658                bfa_stats(rp, sm_offp_unexp);
4659                bfa_sm_fault(rp->bfa, event);
4660        }
4661}
4662
4663/*
4664 * IOC h/w failed.
4665 */
4666static void
4667bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4668{
4669        bfa_trc(rp->bfa, rp->rport_tag);
4670        bfa_trc(rp->bfa, event);
4671
4672        switch (event) {
4673        case BFA_RPORT_SM_OFFLINE:
4674                bfa_stats(rp, sm_iocd_off);
4675                bfa_rport_offline_cb(rp);
4676                break;
4677
4678        case BFA_RPORT_SM_DELETE:
4679                bfa_stats(rp, sm_iocd_del);
4680                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4681                bfa_rport_free(rp);
4682                break;
4683
4684        case BFA_RPORT_SM_ONLINE:
4685                bfa_stats(rp, sm_iocd_on);
4686                if (bfa_rport_send_fwcreate(rp))
4687                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4688                else
4689                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4690                break;
4691
4692        case BFA_RPORT_SM_HWFAIL:
4693                break;
4694
4695        default:
4696                bfa_stats(rp, sm_iocd_unexp);
4697                bfa_sm_fault(rp->bfa, event);
4698        }
4699}
4700
4701
4702
4703/*
4704 *  bfa_rport_private BFA rport private functions
4705 */
4706
4707static void
4708__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4709{
4710        struct bfa_rport_s *rp = cbarg;
4711
4712        if (complete)
4713                bfa_cb_rport_online(rp->rport_drv);
4714}
4715
4716static void
4717__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4718{
4719        struct bfa_rport_s *rp = cbarg;
4720
4721        if (complete)
4722                bfa_cb_rport_offline(rp->rport_drv);
4723}
4724
4725static void
4726bfa_rport_qresume(void *cbarg)
4727{
4728        struct bfa_rport_s      *rp = cbarg;
4729
4730        bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4731}
4732
4733static void
4734bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4735                struct bfa_s *bfa)
4736{
4737        struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4738
4739        if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4740                cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4741
4742        /* kva memory */
4743        bfa_mem_kva_setup(minfo, rport_kva,
4744                cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4745}
4746
4747static void
4748bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4749                struct bfa_pcidev_s *pcidev)
4750{
4751        struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4752        struct bfa_rport_s *rp;
4753        u16 i;
4754
4755        INIT_LIST_HEAD(&mod->rp_free_q);
4756        INIT_LIST_HEAD(&mod->rp_active_q);
4757        INIT_LIST_HEAD(&mod->rp_unused_q);
4758
4759        rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4760        mod->rps_list = rp;
4761        mod->num_rports = cfg->fwcfg.num_rports;
4762
4763        WARN_ON(!mod->num_rports ||
4764                   (mod->num_rports & (mod->num_rports - 1)));
4765
4766        for (i = 0; i < mod->num_rports; i++, rp++) {
4767                memset(rp, 0, sizeof(struct bfa_rport_s));
4768                rp->bfa = bfa;
4769                rp->rport_tag = i;
4770                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4771
4772                /*
4773                 *  - is unused
4774                 */
4775                if (i)
4776                        list_add_tail(&rp->qe, &mod->rp_free_q);
4777
4778                bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4779        }
4780
4781        /*
4782         * consume memory
4783         */
4784        bfa_mem_kva_curp(mod) = (u8 *) rp;
4785}
4786
4787static void
4788bfa_rport_detach(struct bfa_s *bfa)
4789{
4790}
4791
4792static void
4793bfa_rport_start(struct bfa_s *bfa)
4794{
4795}
4796
4797static void
4798bfa_rport_stop(struct bfa_s *bfa)
4799{
4800}
4801
4802static void
4803bfa_rport_iocdisable(struct bfa_s *bfa)
4804{
4805        struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4806        struct bfa_rport_s *rport;
4807        struct list_head *qe, *qen;
4808
4809        /* Enqueue unused rport resources to free_q */
4810        list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4811
4812        list_for_each_safe(qe, qen, &mod->rp_active_q) {
4813                rport = (struct bfa_rport_s *) qe;
4814                bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4815        }
4816}
4817
4818static struct bfa_rport_s *
4819bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4820{
4821        struct bfa_rport_s *rport;
4822
4823        bfa_q_deq(&mod->rp_free_q, &rport);
4824        if (rport)
4825                list_add_tail(&rport->qe, &mod->rp_active_q);
4826
4827        return rport;
4828}
4829
4830static void
4831bfa_rport_free(struct bfa_rport_s *rport)
4832{
4833        struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4834
4835        WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4836        list_del(&rport->qe);
4837        list_add_tail(&rport->qe, &mod->rp_free_q);
4838}
4839
4840static bfa_boolean_t
4841bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4842{
4843        struct bfi_rport_create_req_s *m;
4844
4845        /*
4846         * check for room in queue to send request now
4847         */
4848        m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4849        if (!m) {
4850                bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4851                return BFA_FALSE;
4852        }
4853
4854        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4855                        bfa_fn_lpu(rp->bfa));
4856        m->bfa_handle = rp->rport_tag;
4857        m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4858        m->pid = rp->rport_info.pid;
4859        m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4860        m->local_pid = rp->rport_info.local_pid;
4861        m->fc_class = rp->rport_info.fc_class;
4862        m->vf_en = rp->rport_info.vf_en;
4863        m->vf_id = rp->rport_info.vf_id;
4864        m->cisc = rp->rport_info.cisc;
4865
4866        /*
4867         * queue I/O message to firmware
4868         */
4869        bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4870        return BFA_TRUE;
4871}
4872
4873static bfa_boolean_t
4874bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4875{
4876        struct bfi_rport_delete_req_s *m;
4877
4878        /*
4879         * check for room in queue to send request now
4880         */
4881        m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4882        if (!m) {
4883                bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4884                return BFA_FALSE;
4885        }
4886
4887        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4888                        bfa_fn_lpu(rp->bfa));
4889        m->fw_handle = rp->fw_handle;
4890
4891        /*
4892         * queue I/O message to firmware
4893         */
4894        bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4895        return BFA_TRUE;
4896}
4897
4898static bfa_boolean_t
4899bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4900{
4901        struct bfa_rport_speed_req_s *m;
4902
4903        /*
4904         * check for room in queue to send request now
4905         */
4906        m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4907        if (!m) {
4908                bfa_trc(rp->bfa, rp->rport_info.speed);
4909                return BFA_FALSE;
4910        }
4911
4912        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4913                        bfa_fn_lpu(rp->bfa));
4914        m->fw_handle = rp->fw_handle;
4915        m->speed = (u8)rp->rport_info.speed;
4916
4917        /*
4918         * queue I/O message to firmware
4919         */
4920        bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4921        return BFA_TRUE;
4922}
4923
4924
4925
4926/*
4927 *  bfa_rport_public
4928 */
4929
4930/*
4931 * Rport interrupt processing.
4932 */
4933void
4934bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4935{
4936        union bfi_rport_i2h_msg_u msg;
4937        struct bfa_rport_s *rp;
4938
4939        bfa_trc(bfa, m->mhdr.msg_id);
4940
4941        msg.msg = m;
4942
4943        switch (m->mhdr.msg_id) {
4944        case BFI_RPORT_I2H_CREATE_RSP:
4945                rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4946                rp->fw_handle = msg.create_rsp->fw_handle;
4947                rp->qos_attr = msg.create_rsp->qos_attr;
4948                bfa_rport_set_lunmask(bfa, rp);
4949                WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4950                bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4951                break;
4952
4953        case BFI_RPORT_I2H_DELETE_RSP:
4954                rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4955                WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4956                bfa_rport_unset_lunmask(bfa, rp);
4957                bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4958                break;
4959
4960        case BFI_RPORT_I2H_QOS_SCN:
4961                rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4962                rp->event_arg.fw_msg = msg.qos_scn_evt;
4963                bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4964                break;
4965
4966        case BFI_RPORT_I2H_LIP_SCN_ONLINE:
4967                bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
4968                                &msg.lip_scn->loop_info);
4969                bfa_cb_rport_scn_online(bfa);
4970                break;
4971
4972        case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
4973                bfa_cb_rport_scn_offline(bfa);
4974                break;
4975
4976        case BFI_RPORT_I2H_NO_DEV:
4977                rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
4978                bfa_cb_rport_scn_no_dev(rp->rport_drv);
4979                break;
4980
4981        default:
4982                bfa_trc(bfa, m->mhdr.msg_id);
4983                WARN_ON(1);
4984        }
4985}
4986
4987void
4988bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
4989{
4990        struct bfa_rport_mod_s  *mod = BFA_RPORT_MOD(bfa);
4991        struct list_head        *qe;
4992        int     i;
4993
4994        for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
4995                bfa_q_deq_tail(&mod->rp_free_q, &qe);
4996                list_add_tail(qe, &mod->rp_unused_q);
4997        }
4998}
4999
5000/*
5001 *  bfa_rport_api
5002 */
5003
5004struct bfa_rport_s *
5005bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
5006{
5007        struct bfa_rport_s *rp;
5008
5009        rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
5010
5011        if (rp == NULL)
5012                return NULL;
5013
5014        rp->bfa = bfa;
5015        rp->rport_drv = rport_drv;
5016        memset(&rp->stats, 0, sizeof(rp->stats));
5017
5018        WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
5019        bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
5020
5021        return rp;
5022}
5023
5024void
5025bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
5026{
5027        WARN_ON(rport_info->max_frmsz == 0);
5028
5029        /*
5030         * Some JBODs are seen to be not setting PDU size correctly in PLOGI
5031         * responses. Default to minimum size.
5032         */
5033        if (rport_info->max_frmsz == 0) {
5034                bfa_trc(rport->bfa, rport->rport_tag);
5035                rport_info->max_frmsz = FC_MIN_PDUSZ;
5036        }
5037
5038        rport->rport_info = *rport_info;
5039        bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
5040}
5041
5042void
5043bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
5044{
5045        WARN_ON(speed == 0);
5046        WARN_ON(speed == BFA_PORT_SPEED_AUTO);
5047
5048        if (rport) {
5049                rport->rport_info.speed = speed;
5050                bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
5051        }
5052}
5053
5054/* Set Rport LUN Mask */
5055void
5056bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5057{
5058        struct bfa_lps_mod_s    *lps_mod = BFA_LPS_MOD(bfa);
5059        wwn_t   lp_wwn, rp_wwn;
5060        u8 lp_tag = (u8)rp->rport_info.lp_tag;
5061
5062        rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5063        lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5064
5065        BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5066                                        rp->lun_mask = BFA_TRUE;
5067        bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
5068}
5069
5070/* Unset Rport LUN mask */
5071void
5072bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5073{
5074        struct bfa_lps_mod_s    *lps_mod = BFA_LPS_MOD(bfa);
5075        wwn_t   lp_wwn, rp_wwn;
5076
5077        rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5078        lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5079
5080        BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5081                                rp->lun_mask = BFA_FALSE;
5082        bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
5083                        BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
5084}
5085
5086/*
5087 * SGPG related functions
5088 */
5089
5090/*
5091 * Compute and return memory needed by FCP(im) module.
5092 */
5093static void
5094bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5095                struct bfa_s *bfa)
5096{
5097        struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
5098        struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
5099        struct bfa_mem_dma_s *seg_ptr;
5100        u16     nsegs, idx, per_seg_sgpg, num_sgpg;
5101        u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
5102
5103        if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
5104                cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
5105        else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
5106                cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
5107
5108        num_sgpg = cfg->drvcfg.num_sgpgs;
5109
5110        nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5111        per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
5112
5113        bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
5114                if (num_sgpg >= per_seg_sgpg) {
5115                        num_sgpg -= per_seg_sgpg;
5116                        bfa_mem_dma_setup(minfo, seg_ptr,
5117                                        per_seg_sgpg * sgpg_sz);
5118                } else
5119                        bfa_mem_dma_setup(minfo, seg_ptr,
5120                                        num_sgpg * sgpg_sz);
5121        }
5122
5123        /* kva memory */
5124        bfa_mem_kva_setup(minfo, sgpg_kva,
5125                cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
5126}
5127
5128static void
5129bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5130                struct bfa_pcidev_s *pcidev)
5131{
5132        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5133        struct bfa_sgpg_s *hsgpg;
5134        struct bfi_sgpg_s *sgpg;
5135        u64 align_len;
5136        struct bfa_mem_dma_s *seg_ptr;
5137        u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
5138        u16     i, idx, nsegs, per_seg_sgpg, num_sgpg;
5139
5140        union {
5141                u64 pa;
5142                union bfi_addr_u addr;
5143        } sgpg_pa, sgpg_pa_tmp;
5144
5145        INIT_LIST_HEAD(&mod->sgpg_q);
5146        INIT_LIST_HEAD(&mod->sgpg_wait_q);
5147
5148        bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
5149
5150        mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
5151
5152        num_sgpg = cfg->drvcfg.num_sgpgs;
5153        nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5154
5155        /* dma/kva mem claim */
5156        hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
5157
5158        bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
5159
5160                if (!bfa_mem_dma_virt(seg_ptr))
5161                        break;
5162
5163                align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
5164                                             bfa_mem_dma_phys(seg_ptr);
5165
5166                sgpg = (struct bfi_sgpg_s *)
5167                        (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
5168                sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
5169                WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
5170
5171                per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
5172
5173                for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
5174                        memset(hsgpg, 0, sizeof(*hsgpg));
5175                        memset(sgpg, 0, sizeof(*sgpg));
5176
5177                        hsgpg->sgpg = sgpg;
5178                        sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
5179                        hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
5180                        list_add_tail(&hsgpg->qe, &mod->sgpg_q);
5181
5182                        sgpg++;
5183                        hsgpg++;
5184                        sgpg_pa.pa += sgpg_sz;
5185                }
5186        }
5187
5188        bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
5189}
5190
5191static void
5192bfa_sgpg_detach(struct bfa_s *bfa)
5193{
5194}
5195
5196static void
5197bfa_sgpg_start(struct bfa_s *bfa)
5198{
5199}
5200
5201static void
5202bfa_sgpg_stop(struct bfa_s *bfa)
5203{
5204}
5205
5206static void
5207bfa_sgpg_iocdisable(struct bfa_s *bfa)
5208{
5209}
5210
5211bfa_status_t
5212bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5213{
5214        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5215        struct bfa_sgpg_s *hsgpg;
5216        int i;
5217
5218        if (mod->free_sgpgs < nsgpgs)
5219                return BFA_STATUS_ENOMEM;
5220
5221        for (i = 0; i < nsgpgs; i++) {
5222                bfa_q_deq(&mod->sgpg_q, &hsgpg);
5223                WARN_ON(!hsgpg);
5224                list_add_tail(&hsgpg->qe, sgpg_q);
5225        }
5226
5227        mod->free_sgpgs -= nsgpgs;
5228        return BFA_STATUS_OK;
5229}
5230
5231void
5232bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5233{
5234        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5235        struct bfa_sgpg_wqe_s *wqe;
5236
5237        mod->free_sgpgs += nsgpg;
5238        WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
5239
5240        list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5241
5242        if (list_empty(&mod->sgpg_wait_q))
5243                return;
5244
5245        /*
5246         * satisfy as many waiting requests as possible
5247         */
5248        do {
5249                wqe = bfa_q_first(&mod->sgpg_wait_q);
5250                if (mod->free_sgpgs < wqe->nsgpg)
5251                        nsgpg = mod->free_sgpgs;
5252                else
5253                        nsgpg = wqe->nsgpg;
5254                bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5255                wqe->nsgpg -= nsgpg;
5256                if (wqe->nsgpg == 0) {
5257                        list_del(&wqe->qe);
5258                        wqe->cbfn(wqe->cbarg);
5259                }
5260        } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5261}
5262
5263void
5264bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5265{
5266        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5267
5268        WARN_ON(nsgpg <= 0);
5269        WARN_ON(nsgpg <= mod->free_sgpgs);
5270
5271        wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5272
5273        /*
5274         * allocate any left to this one first
5275         */
5276        if (mod->free_sgpgs) {
5277                /*
5278                 * no one else is waiting for SGPG
5279                 */
5280                WARN_ON(!list_empty(&mod->sgpg_wait_q));
5281                list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5282                wqe->nsgpg -= mod->free_sgpgs;
5283                mod->free_sgpgs = 0;
5284        }
5285
5286        list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5287}
5288
5289void
5290bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5291{
5292        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5293
5294        WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5295        list_del(&wqe->qe);
5296
5297        if (wqe->nsgpg_total != wqe->nsgpg)
5298                bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5299                                   wqe->nsgpg_total - wqe->nsgpg);
5300}
5301
5302void
5303bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5304                   void *cbarg)
5305{
5306        INIT_LIST_HEAD(&wqe->sgpg_q);
5307        wqe->cbfn = cbfn;
5308        wqe->cbarg = cbarg;
5309}
5310
5311/*
5312 *  UF related functions
5313 */
5314/*
5315 *****************************************************************************
5316 * Internal functions
5317 *****************************************************************************
5318 */
5319static void
5320__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5321{
5322        struct bfa_uf_s   *uf = cbarg;
5323        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5324
5325        if (complete)
5326                ufm->ufrecv(ufm->cbarg, uf);
5327}
5328
5329static void
5330claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5331{
5332        struct bfi_uf_buf_post_s *uf_bp_msg;
5333        u16 i;
5334        u16 buf_len;
5335
5336        ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5337        uf_bp_msg = ufm->uf_buf_posts;
5338
5339        for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5340             i++, uf_bp_msg++) {
5341                memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5342
5343                uf_bp_msg->buf_tag = i;
5344                buf_len = sizeof(struct bfa_uf_buf_s);
5345                uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5346                bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5347                            bfa_fn_lpu(ufm->bfa));
5348                bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5349        }
5350
5351        /*
5352         * advance pointer beyond consumed memory
5353         */
5354        bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5355}
5356
5357static void
5358claim_ufs(struct bfa_uf_mod_s *ufm)
5359{
5360        u16 i;
5361        struct bfa_uf_s   *uf;
5362
5363        /*
5364         * Claim block of memory for UF list
5365         */
5366        ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5367
5368        /*
5369         * Initialize UFs and queue it in UF free queue
5370         */
5371        for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5372                memset(uf, 0, sizeof(struct bfa_uf_s));
5373                uf->bfa = ufm->bfa;
5374                uf->uf_tag = i;
5375                uf->pb_len = BFA_PER_UF_DMA_SZ;
5376                uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5377                uf->buf_pa = ufm_pbs_pa(ufm, i);
5378                list_add_tail(&uf->qe, &ufm->uf_free_q);
5379        }
5380
5381        /*
5382         * advance memory pointer
5383         */
5384        bfa_mem_kva_curp(ufm) = (u8 *) uf;
5385}
5386
5387static void
5388uf_mem_claim(struct bfa_uf_mod_s *ufm)
5389{
5390        claim_ufs(ufm);
5391        claim_uf_post_msgs(ufm);
5392}
5393
5394static void
5395bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5396                struct bfa_s *bfa)
5397{
5398        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5399        struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5400        u32     num_ufs = cfg->fwcfg.num_uf_bufs;
5401        struct bfa_mem_dma_s *seg_ptr;
5402        u16     nsegs, idx, per_seg_uf = 0;
5403
5404        nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5405        per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5406
5407        bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5408                if (num_ufs >= per_seg_uf) {
5409                        num_ufs -= per_seg_uf;
5410                        bfa_mem_dma_setup(minfo, seg_ptr,
5411                                per_seg_uf * BFA_PER_UF_DMA_SZ);
5412                } else
5413                        bfa_mem_dma_setup(minfo, seg_ptr,
5414                                num_ufs * BFA_PER_UF_DMA_SZ);
5415        }
5416
5417        /* kva memory */
5418        bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5419                (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5420}
5421
5422static void
5423bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5424                struct bfa_pcidev_s *pcidev)
5425{
5426        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5427
5428        ufm->bfa = bfa;
5429        ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5430        INIT_LIST_HEAD(&ufm->uf_free_q);
5431        INIT_LIST_HEAD(&ufm->uf_posted_q);
5432        INIT_LIST_HEAD(&ufm->uf_unused_q);
5433
5434        uf_mem_claim(ufm);
5435}
5436
5437static void
5438bfa_uf_detach(struct bfa_s *bfa)
5439{
5440}
5441
5442static struct bfa_uf_s *
5443bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5444{
5445        struct bfa_uf_s   *uf;
5446
5447        bfa_q_deq(&uf_mod->uf_free_q, &uf);
5448        return uf;
5449}
5450
5451static void
5452bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5453{
5454        list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5455}
5456
5457static bfa_status_t
5458bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5459{
5460        struct bfi_uf_buf_post_s *uf_post_msg;
5461
5462        uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5463        if (!uf_post_msg)
5464                return BFA_STATUS_FAILED;
5465
5466        memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5467                      sizeof(struct bfi_uf_buf_post_s));
5468        bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5469
5470        bfa_trc(ufm->bfa, uf->uf_tag);
5471
5472        list_add_tail(&uf->qe, &ufm->uf_posted_q);
5473        return BFA_STATUS_OK;
5474}
5475
5476static void
5477bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5478{
5479        struct bfa_uf_s   *uf;
5480
5481        while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5482                if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5483                        break;
5484        }
5485}
5486
5487static void
5488uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5489{
5490        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5491        u16 uf_tag = m->buf_tag;
5492        struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5493        struct bfa_uf_buf_s *uf_buf;
5494        uint8_t *buf;
5495        struct fchs_s *fchs;
5496
5497        uf_buf = (struct bfa_uf_buf_s *)
5498                        bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5499        buf = &uf_buf->d[0];
5500
5501        m->frm_len = be16_to_cpu(m->frm_len);
5502        m->xfr_len = be16_to_cpu(m->xfr_len);
5503
5504        fchs = (struct fchs_s *)uf_buf;
5505
5506        list_del(&uf->qe);      /* dequeue from posted queue */
5507
5508        uf->data_ptr = buf;
5509        uf->data_len = m->xfr_len;
5510
5511        WARN_ON(uf->data_len < sizeof(struct fchs_s));
5512
5513        if (uf->data_len == sizeof(struct fchs_s)) {
5514                bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5515                               uf->data_len, (struct fchs_s *)buf);
5516        } else {
5517                u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5518                bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5519                                      BFA_PL_EID_RX, uf->data_len,
5520                                      (struct fchs_s *)buf, pld_w0);
5521        }
5522
5523        if (bfa->fcs)
5524                __bfa_cb_uf_recv(uf, BFA_TRUE);
5525        else
5526                bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5527}
5528
5529static void
5530bfa_uf_stop(struct bfa_s *bfa)
5531{
5532}
5533
5534static void
5535bfa_uf_iocdisable(struct bfa_s *bfa)
5536{
5537        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5538        struct bfa_uf_s *uf;
5539        struct list_head *qe, *qen;
5540
5541        /* Enqueue unused uf resources to free_q */
5542        list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5543
5544        list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5545                uf = (struct bfa_uf_s *) qe;
5546                list_del(&uf->qe);
5547                bfa_uf_put(ufm, uf);
5548        }
5549}
5550
5551static void
5552bfa_uf_start(struct bfa_s *bfa)
5553{
5554        bfa_uf_post_all(BFA_UF_MOD(bfa));
5555}
5556
5557/*
5558 * Register handler for all unsolicted receive frames.
5559 *
5560 * @param[in]   bfa             BFA instance
5561 * @param[in]   ufrecv  receive handler function
5562 * @param[in]   cbarg   receive handler arg
5563 */
5564void
5565bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5566{
5567        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5568
5569        ufm->ufrecv = ufrecv;
5570        ufm->cbarg = cbarg;
5571}
5572
5573/*
5574 *      Free an unsolicited frame back to BFA.
5575 *
5576 * @param[in]           uf              unsolicited frame to be freed
5577 *
5578 * @return None
5579 */
5580void
5581bfa_uf_free(struct bfa_uf_s *uf)
5582{
5583        bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5584        bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5585}
5586
5587
5588
5589/*
5590 *  uf_pub BFA uf module public functions
5591 */
5592void
5593bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5594{
5595        bfa_trc(bfa, msg->mhdr.msg_id);
5596
5597        switch (msg->mhdr.msg_id) {
5598        case BFI_UF_I2H_FRM_RCVD:
5599                uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5600                break;
5601
5602        default:
5603                bfa_trc(bfa, msg->mhdr.msg_id);
5604                WARN_ON(1);
5605        }
5606}
5607
5608void
5609bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5610{
5611        struct bfa_uf_mod_s     *mod = BFA_UF_MOD(bfa);
5612        struct list_head        *qe;
5613        int     i;
5614
5615        for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5616                bfa_q_deq_tail(&mod->uf_free_q, &qe);
5617                list_add_tail(qe, &mod->uf_unused_q);
5618        }
5619}
5620
5621/*
5622 *      Dport forward declaration
5623 */
5624
5625/*
5626 * BFA DPORT state machine events
5627 */
5628enum bfa_dport_sm_event {
5629        BFA_DPORT_SM_ENABLE     = 1,    /* dport enable event         */
5630        BFA_DPORT_SM_DISABLE    = 2,    /* dport disable event        */
5631        BFA_DPORT_SM_FWRSP      = 3,    /* fw enable/disable rsp      */
5632        BFA_DPORT_SM_QRESUME    = 4,    /* CQ space available         */
5633        BFA_DPORT_SM_HWFAIL     = 5,    /* IOC h/w failure            */
5634};
5635
5636static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5637                                  enum bfa_dport_sm_event event);
5638static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5639                                  enum bfa_dport_sm_event event);
5640static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5641                                  enum bfa_dport_sm_event event);
5642static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5643                                 enum bfa_dport_sm_event event);
5644static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5645                                 enum bfa_dport_sm_event event);
5646static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5647                                   enum bfa_dport_sm_event event);
5648static void bfa_dport_qresume(void *cbarg);
5649static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5650                               bfi_diag_dport_rsp_t *msg);
5651
5652/*
5653 *      BFA fcdiag module
5654 */
5655#define BFA_DIAG_QTEST_TOV      1000    /* msec */
5656
5657/*
5658 *      Set port status to busy
5659 */
5660static void
5661bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5662{
5663        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5664
5665        if (fcdiag->lb.lock)
5666                fcport->diag_busy = BFA_TRUE;
5667        else
5668                fcport->diag_busy = BFA_FALSE;
5669}
5670
5671static void
5672bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5673                struct bfa_s *bfa)
5674{
5675}
5676
5677static void
5678bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5679                struct bfa_pcidev_s *pcidev)
5680{
5681        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5682        struct bfa_dport_s  *dport = &fcdiag->dport;
5683
5684        fcdiag->bfa             = bfa;
5685        fcdiag->trcmod  = bfa->trcmod;
5686        /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5687        dport->bfa = bfa;
5688        bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5689        bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5690        dport->cbfn = NULL;
5691        dport->cbarg = NULL;
5692}
5693
5694static void
5695bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5696{
5697        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5698        struct bfa_dport_s *dport = &fcdiag->dport;
5699
5700        bfa_trc(fcdiag, fcdiag->lb.lock);
5701        if (fcdiag->lb.lock) {
5702                fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5703                fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5704                fcdiag->lb.lock = 0;
5705                bfa_fcdiag_set_busy_status(fcdiag);
5706        }
5707
5708        bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
5709}
5710
5711static void
5712bfa_fcdiag_detach(struct bfa_s *bfa)
5713{
5714}
5715
5716static void
5717bfa_fcdiag_start(struct bfa_s *bfa)
5718{
5719}
5720
5721static void
5722bfa_fcdiag_stop(struct bfa_s *bfa)
5723{
5724}
5725
5726static void
5727bfa_fcdiag_queuetest_timeout(void *cbarg)
5728{
5729        struct bfa_fcdiag_s       *fcdiag = cbarg;
5730        struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5731
5732        bfa_trc(fcdiag, fcdiag->qtest.all);
5733        bfa_trc(fcdiag, fcdiag->qtest.count);
5734
5735        fcdiag->qtest.timer_active = 0;
5736
5737        res->status = BFA_STATUS_ETIMER;
5738        res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5739        if (fcdiag->qtest.all)
5740                res->queue  = fcdiag->qtest.all;
5741
5742        bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5743        fcdiag->qtest.status = BFA_STATUS_ETIMER;
5744        fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5745        fcdiag->qtest.lock = 0;
5746}
5747
5748static bfa_status_t
5749bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5750{
5751        u32     i;
5752        struct bfi_diag_qtest_req_s *req;
5753
5754        req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5755        if (!req)
5756                return BFA_STATUS_DEVBUSY;
5757
5758        /* build host command */
5759        bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5760                bfa_fn_lpu(fcdiag->bfa));
5761
5762        for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5763                req->data[i] = QTEST_PAT_DEFAULT;
5764
5765        bfa_trc(fcdiag, fcdiag->qtest.queue);
5766        /* ring door bell */
5767        bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5768        return BFA_STATUS_OK;
5769}
5770
5771static void
5772bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5773                        bfi_diag_qtest_rsp_t *rsp)
5774{
5775        struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5776        bfa_status_t status = BFA_STATUS_OK;
5777        int i;
5778
5779        /* Check timer, should still be active   */
5780        if (!fcdiag->qtest.timer_active) {
5781                bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5782                return;
5783        }
5784
5785        /* update count */
5786        fcdiag->qtest.count--;
5787
5788        /* Check result */
5789        for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5790                if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5791                        res->status = BFA_STATUS_DATACORRUPTED;
5792                        break;
5793                }
5794        }
5795
5796        if (res->status == BFA_STATUS_OK) {
5797                if (fcdiag->qtest.count > 0) {
5798                        status = bfa_fcdiag_queuetest_send(fcdiag);
5799                        if (status == BFA_STATUS_OK)
5800                                return;
5801                        else
5802                                res->status = status;
5803                } else if (fcdiag->qtest.all > 0 &&
5804                        fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5805                        fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5806                        fcdiag->qtest.queue++;
5807                        status = bfa_fcdiag_queuetest_send(fcdiag);
5808                        if (status == BFA_STATUS_OK)
5809                                return;
5810                        else
5811                                res->status = status;
5812                }
5813        }
5814
5815        /* Stop timer when we comp all queue */
5816        if (fcdiag->qtest.timer_active) {
5817                bfa_timer_stop(&fcdiag->qtest.timer);
5818                fcdiag->qtest.timer_active = 0;
5819        }
5820        res->queue = fcdiag->qtest.queue;
5821        res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5822        bfa_trc(fcdiag, res->count);
5823        bfa_trc(fcdiag, res->status);
5824        fcdiag->qtest.status = res->status;
5825        fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5826        fcdiag->qtest.lock = 0;
5827}
5828
5829static void
5830bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5831                        struct bfi_diag_lb_rsp_s *rsp)
5832{
5833        struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5834
5835        res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
5836        res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
5837        res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
5838        res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
5839        res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
5840        res->status     = rsp->res.status;
5841        fcdiag->lb.status = rsp->res.status;
5842        bfa_trc(fcdiag, fcdiag->lb.status);
5843        fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5844        fcdiag->lb.lock = 0;
5845        bfa_fcdiag_set_busy_status(fcdiag);
5846}
5847
5848static bfa_status_t
5849bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5850                        struct bfa_diag_loopback_s *loopback)
5851{
5852        struct bfi_diag_lb_req_s *lb_req;
5853
5854        lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5855        if (!lb_req)
5856                return BFA_STATUS_DEVBUSY;
5857
5858        /* build host command */
5859        bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5860                bfa_fn_lpu(fcdiag->bfa));
5861
5862        lb_req->lb_mode = loopback->lb_mode;
5863        lb_req->speed = loopback->speed;
5864        lb_req->loopcnt = loopback->loopcnt;
5865        lb_req->pattern = loopback->pattern;
5866
5867        /* ring door bell */
5868        bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5869
5870        bfa_trc(fcdiag, loopback->lb_mode);
5871        bfa_trc(fcdiag, loopback->speed);
5872        bfa_trc(fcdiag, loopback->loopcnt);
5873        bfa_trc(fcdiag, loopback->pattern);
5874        return BFA_STATUS_OK;
5875}
5876
5877/*
5878 *      cpe/rme intr handler
5879 */
5880void
5881bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5882{
5883        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5884
5885        switch (msg->mhdr.msg_id) {
5886        case BFI_DIAG_I2H_LOOPBACK:
5887                bfa_fcdiag_loopback_comp(fcdiag,
5888                                (struct bfi_diag_lb_rsp_s *) msg);
5889                break;
5890        case BFI_DIAG_I2H_QTEST:
5891                bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5892                break;
5893        case BFI_DIAG_I2H_DPORT:
5894                bfa_dport_req_comp(&fcdiag->dport, (bfi_diag_dport_rsp_t *)msg);
5895                break;
5896        default:
5897                bfa_trc(fcdiag, msg->mhdr.msg_id);
5898                WARN_ON(1);
5899        }
5900}
5901
5902/*
5903 *      Loopback test
5904 *
5905 *   @param[in] *bfa            - bfa data struct
5906 *   @param[in] opmode          - port operation mode
5907 *   @param[in] speed           - port speed
5908 *   @param[in] lpcnt           - loop count
5909 *   @param[in] pat                     - pattern to build packet
5910 *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
5911 *   @param[in] cbfn            - callback function
5912 *   @param[in] cbarg           - callback functioin arg
5913 *
5914 *   @param[out]
5915 */
5916bfa_status_t
5917bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5918                enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5919                struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5920                void *cbarg)
5921{
5922        struct  bfa_diag_loopback_s loopback;
5923        struct bfa_port_attr_s attr;
5924        bfa_status_t status;
5925        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5926
5927        if (!bfa_iocfc_is_operational(bfa))
5928                return BFA_STATUS_IOC_NON_OP;
5929
5930        /* if port is PBC disabled, return error */
5931        if (bfa_fcport_is_pbcdisabled(bfa)) {
5932                bfa_trc(fcdiag, BFA_STATUS_PBC);
5933                return BFA_STATUS_PBC;
5934        }
5935
5936        if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5937                bfa_trc(fcdiag, opmode);
5938                return BFA_STATUS_PORT_NOT_DISABLED;
5939        }
5940
5941        /*
5942         * Check if input speed is supported by the port mode
5943         */
5944        if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5945                if (!(speed == BFA_PORT_SPEED_1GBPS ||
5946                      speed == BFA_PORT_SPEED_2GBPS ||
5947                      speed == BFA_PORT_SPEED_4GBPS ||
5948                      speed == BFA_PORT_SPEED_8GBPS ||
5949                      speed == BFA_PORT_SPEED_16GBPS ||
5950                      speed == BFA_PORT_SPEED_AUTO)) {
5951                        bfa_trc(fcdiag, speed);
5952                        return BFA_STATUS_UNSUPP_SPEED;
5953                }
5954                bfa_fcport_get_attr(bfa, &attr);
5955                bfa_trc(fcdiag, attr.speed_supported);
5956                if (speed > attr.speed_supported)
5957                        return BFA_STATUS_UNSUPP_SPEED;
5958        } else {
5959                if (speed != BFA_PORT_SPEED_10GBPS) {
5960                        bfa_trc(fcdiag, speed);
5961                        return BFA_STATUS_UNSUPP_SPEED;
5962                }
5963        }
5964
5965        /*
5966         * For CT2, 1G is not supported
5967         */
5968        if ((speed == BFA_PORT_SPEED_1GBPS) &&
5969            (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
5970                bfa_trc(fcdiag, speed);
5971                return BFA_STATUS_UNSUPP_SPEED;
5972        }
5973
5974        /* For Mezz card, port speed entered needs to be checked */
5975        if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5976                if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5977                        if (!(speed == BFA_PORT_SPEED_1GBPS ||
5978                              speed == BFA_PORT_SPEED_2GBPS ||
5979                              speed == BFA_PORT_SPEED_4GBPS ||
5980                              speed == BFA_PORT_SPEED_8GBPS ||
5981                              speed == BFA_PORT_SPEED_16GBPS ||
5982                              speed == BFA_PORT_SPEED_AUTO))
5983                                return BFA_STATUS_UNSUPP_SPEED;
5984                } else {
5985                        if (speed != BFA_PORT_SPEED_10GBPS)
5986                                return BFA_STATUS_UNSUPP_SPEED;
5987                }
5988        }
5989
5990        /* check to see if there is another destructive diag cmd running */
5991        if (fcdiag->lb.lock) {
5992                bfa_trc(fcdiag, fcdiag->lb.lock);
5993                return BFA_STATUS_DEVBUSY;
5994        }
5995
5996        fcdiag->lb.lock = 1;
5997        loopback.lb_mode = opmode;
5998        loopback.speed = speed;
5999        loopback.loopcnt = lpcnt;
6000        loopback.pattern = pat;
6001        fcdiag->lb.result = result;
6002        fcdiag->lb.cbfn = cbfn;
6003        fcdiag->lb.cbarg = cbarg;
6004        memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
6005        bfa_fcdiag_set_busy_status(fcdiag);
6006
6007        /* Send msg to fw */
6008        status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
6009        return status;
6010}
6011
6012/*
6013 *      DIAG queue test command
6014 *
6015 *   @param[in] *bfa            - bfa data struct
6016 *   @param[in] force           - 1: don't do ioc op checking
6017 *   @param[in] queue           - queue no. to test
6018 *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
6019 *   @param[in] cbfn            - callback function
6020 *   @param[in] *cbarg          - callback functioin arg
6021 *
6022 *   @param[out]
6023 */
6024bfa_status_t
6025bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
6026                struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
6027                void *cbarg)
6028{
6029        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6030        bfa_status_t status;
6031        bfa_trc(fcdiag, force);
6032        bfa_trc(fcdiag, queue);
6033
6034        if (!force && !bfa_iocfc_is_operational(bfa))
6035                return BFA_STATUS_IOC_NON_OP;
6036
6037        /* check to see if there is another destructive diag cmd running */
6038        if (fcdiag->qtest.lock) {
6039                bfa_trc(fcdiag, fcdiag->qtest.lock);
6040                return BFA_STATUS_DEVBUSY;
6041        }
6042
6043        /* Initialization */
6044        fcdiag->qtest.lock = 1;
6045        fcdiag->qtest.cbfn = cbfn;
6046        fcdiag->qtest.cbarg = cbarg;
6047        fcdiag->qtest.result = result;
6048        fcdiag->qtest.count = QTEST_CNT_DEFAULT;
6049
6050        /* Init test results */
6051        fcdiag->qtest.result->status = BFA_STATUS_OK;
6052        fcdiag->qtest.result->count  = 0;
6053
6054        /* send */
6055        if (queue < BFI_IOC_MAX_CQS) {
6056                fcdiag->qtest.result->queue  = (u8)queue;
6057                fcdiag->qtest.queue = (u8)queue;
6058                fcdiag->qtest.all   = 0;
6059        } else {
6060                fcdiag->qtest.result->queue  = 0;
6061                fcdiag->qtest.queue = 0;
6062                fcdiag->qtest.all   = 1;
6063        }
6064        status = bfa_fcdiag_queuetest_send(fcdiag);
6065
6066        /* Start a timer */
6067        if (status == BFA_STATUS_OK) {
6068                bfa_timer_start(bfa, &fcdiag->qtest.timer,
6069                                bfa_fcdiag_queuetest_timeout, fcdiag,
6070                                BFA_DIAG_QTEST_TOV);
6071                fcdiag->qtest.timer_active = 1;
6072        }
6073        return status;
6074}
6075
6076/*
6077 * DIAG PLB is running
6078 *
6079 *   @param[in] *bfa    - bfa data struct
6080 *
6081 *   @param[out]
6082 */
6083bfa_status_t
6084bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
6085{
6086        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6087        return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
6088}
6089
6090/*
6091 *      D-port
6092 */
6093static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
6094                                        enum bfi_dport_req req);
6095static void
6096bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
6097{
6098        if (dport->cbfn != NULL) {
6099                dport->cbfn(dport->cbarg, bfa_status);
6100                dport->cbfn = NULL;
6101                dport->cbarg = NULL;
6102        }
6103}
6104
6105static void
6106bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6107{
6108        bfa_trc(dport->bfa, event);
6109
6110        switch (event) {
6111        case BFA_DPORT_SM_ENABLE:
6112                bfa_fcport_dportenable(dport->bfa);
6113                if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
6114                        bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6115                else
6116                        bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
6117                break;
6118
6119        case BFA_DPORT_SM_DISABLE:
6120                /* Already disabled */
6121                break;
6122
6123        case BFA_DPORT_SM_HWFAIL:
6124                /* ignore */
6125                break;
6126
6127        default:
6128                bfa_sm_fault(dport->bfa, event);
6129        }
6130}
6131
6132static void
6133bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6134                            enum bfa_dport_sm_event event)
6135{
6136        bfa_trc(dport->bfa, event);
6137
6138        switch (event) {
6139        case BFA_DPORT_SM_QRESUME:
6140                bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6141                bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6142                break;
6143
6144        case BFA_DPORT_SM_HWFAIL:
6145                bfa_reqq_wcancel(&dport->reqq_wait);
6146                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6147                bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6148                break;
6149
6150        default:
6151                bfa_sm_fault(dport->bfa, event);
6152        }
6153}
6154
6155static void
6156bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6157{
6158        bfa_trc(dport->bfa, event);
6159
6160        switch (event) {
6161        case BFA_DPORT_SM_FWRSP:
6162                bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6163                break;
6164
6165        case BFA_DPORT_SM_HWFAIL:
6166                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6167                bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6168                break;
6169
6170        default:
6171                bfa_sm_fault(dport->bfa, event);
6172        }
6173}
6174
6175static void
6176bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6177{
6178        bfa_trc(dport->bfa, event);
6179
6180        switch (event) {
6181        case BFA_DPORT_SM_ENABLE:
6182                /* Already enabled */
6183                break;
6184
6185        case BFA_DPORT_SM_DISABLE:
6186                bfa_fcport_dportdisable(dport->bfa);
6187                if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6188                        bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6189                else
6190                        bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6191                break;
6192
6193        case BFA_DPORT_SM_HWFAIL:
6194                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6195                break;
6196
6197        default:
6198                bfa_sm_fault(dport->bfa, event);
6199        }
6200}
6201
6202static void
6203bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6204                             enum bfa_dport_sm_event event)
6205{
6206        bfa_trc(dport->bfa, event);
6207
6208        switch (event) {
6209        case BFA_DPORT_SM_QRESUME:
6210                bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6211                bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6212                break;
6213
6214        case BFA_DPORT_SM_HWFAIL:
6215                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6216                bfa_reqq_wcancel(&dport->reqq_wait);
6217                bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6218                break;
6219
6220        default:
6221                bfa_sm_fault(dport->bfa, event);
6222        }
6223}
6224
6225static void
6226bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6227{
6228        bfa_trc(dport->bfa, event);
6229
6230        switch (event) {
6231        case BFA_DPORT_SM_FWRSP:
6232                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6233                break;
6234
6235        case BFA_DPORT_SM_HWFAIL:
6236                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6237                bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6238                break;
6239
6240        default:
6241                bfa_sm_fault(dport->bfa, event);
6242        }
6243}
6244
6245
6246static bfa_boolean_t
6247bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6248{
6249        struct bfi_diag_dport_req_s *m;
6250
6251        /*
6252         * Increment message tag before queue check, so that responses to old
6253         * requests are discarded.
6254         */
6255        dport->msgtag++;
6256
6257        /*
6258         * check for room in queue to send request now
6259         */
6260        m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6261        if (!m) {
6262                bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6263                return BFA_FALSE;
6264        }
6265
6266        bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6267                    bfa_fn_lpu(dport->bfa));
6268        m->req  = req;
6269        m->msgtag = dport->msgtag;
6270
6271        /*
6272         * queue I/O message to firmware
6273         */
6274        bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6275
6276        return BFA_TRUE;
6277}
6278
6279static void
6280bfa_dport_qresume(void *cbarg)
6281{
6282        struct bfa_dport_s *dport = cbarg;
6283
6284        bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6285}
6286
6287static void
6288bfa_dport_req_comp(struct bfa_dport_s *dport, bfi_diag_dport_rsp_t *msg)
6289{
6290        bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6291        bfa_cb_fcdiag_dport(dport, msg->status);
6292}
6293
6294/*
6295 * Dport enable
6296 *
6297 * @param[in] *bfa            - bfa data struct
6298 */
6299bfa_status_t
6300bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6301{
6302        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6303        struct bfa_dport_s  *dport = &fcdiag->dport;
6304
6305        /*
6306         * Dport is not support in MEZZ card
6307         */
6308        if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6309                bfa_trc(dport->bfa, BFA_STATUS_PBC);
6310                return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6311        }
6312
6313        /*
6314         * Check to see if IOC is down
6315        */
6316        if (!bfa_iocfc_is_operational(bfa))
6317                return BFA_STATUS_IOC_NON_OP;
6318
6319        /* if port is PBC disabled, return error */
6320        if (bfa_fcport_is_pbcdisabled(bfa)) {
6321                bfa_trc(dport->bfa, BFA_STATUS_PBC);
6322                return BFA_STATUS_PBC;
6323        }
6324
6325        /*
6326         * Check if port mode is FC port
6327         */
6328        if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6329                bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6330                return BFA_STATUS_CMD_NOTSUPP_CNA;
6331        }
6332
6333        /*
6334         * Check if port is in LOOP mode
6335         */
6336        if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6337            (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6338                bfa_trc(dport->bfa, 0);
6339                return BFA_STATUS_TOPOLOGY_LOOP;
6340        }
6341
6342        /*
6343         * Check if port is TRUNK mode
6344         */
6345        if (bfa_fcport_is_trunk_enabled(bfa)) {
6346                bfa_trc(dport->bfa, 0);
6347                return BFA_STATUS_ERROR_TRUNK_ENABLED;
6348        }
6349
6350        /*
6351         * Check to see if port is disable or in dport state
6352         */
6353        if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6354            (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6355                bfa_trc(dport->bfa, 0);
6356                return BFA_STATUS_PORT_NOT_DISABLED;
6357        }
6358
6359        /*
6360         * Check if dport is busy
6361         */
6362        if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6363            bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6364            bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6365            bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) {
6366                return BFA_STATUS_DEVBUSY;
6367        }
6368
6369        /*
6370         * Check if dport is already enabled
6371         */
6372        if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6373                bfa_trc(dport->bfa, 0);
6374                return BFA_STATUS_DPORT_ENABLED;
6375        }
6376
6377        dport->cbfn = cbfn;
6378        dport->cbarg = cbarg;
6379
6380        bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6381        return BFA_STATUS_OK;
6382}
6383
6384/*
6385 *      Dport disable
6386 *
6387 *      @param[in] *bfa            - bfa data struct
6388 */
6389bfa_status_t
6390bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6391{
6392        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6393        struct bfa_dport_s *dport = &fcdiag->dport;
6394
6395        if (bfa_ioc_is_disabled(&bfa->ioc))
6396                return BFA_STATUS_IOC_DISABLED;
6397
6398        /* if port is PBC disabled, return error */
6399        if (bfa_fcport_is_pbcdisabled(bfa)) {
6400                bfa_trc(dport->bfa, BFA_STATUS_PBC);
6401                return BFA_STATUS_PBC;
6402        }
6403
6404        /*
6405         * Check to see if port is disable or in dport state
6406         */
6407        if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6408            (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6409                bfa_trc(dport->bfa, 0);
6410                return BFA_STATUS_PORT_NOT_DISABLED;
6411        }
6412
6413        /*
6414         * Check if dport is busy
6415         */
6416        if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6417            bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6418            bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6419            bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
6420                return BFA_STATUS_DEVBUSY;
6421
6422        /*
6423         * Check if dport is already disabled
6424         */
6425        if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6426                bfa_trc(dport->bfa, 0);
6427                return BFA_STATUS_DPORT_DISABLED;
6428        }
6429
6430        dport->cbfn = cbfn;
6431        dport->cbarg = cbarg;
6432
6433        bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6434        return BFA_STATUS_OK;
6435}
6436
6437/*
6438 *      Get D-port state
6439 *
6440 * @param[in] *bfa            - bfa data struct
6441 */
6442
6443bfa_status_t
6444bfa_dport_get_state(struct bfa_s *bfa, enum bfa_dport_state *state)
6445{
6446        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6447        struct bfa_dport_s *dport = &fcdiag->dport;
6448
6449        if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled))
6450                *state = BFA_DPORT_ST_ENABLED;
6451        else if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6452                 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait))
6453                *state = BFA_DPORT_ST_ENABLING;
6454        else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled))
6455                *state = BFA_DPORT_ST_DISABLED;
6456        else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6457                 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
6458                *state = BFA_DPORT_ST_DISABLING;
6459        else {
6460                bfa_trc(dport->bfa, BFA_STATUS_EINVAL);
6461                return BFA_STATUS_EINVAL;
6462        }
6463        return BFA_STATUS_OK;
6464}
6465