linux/drivers/scsi/bfa/bfa_svc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   4 * Copyright (c) 2014- QLogic Corporation.
   5 * All rights reserved
   6 * www.qlogic.com
   7 *
   8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
   9 */
  10
  11#include "bfad_drv.h"
  12#include "bfad_im.h"
  13#include "bfa_plog.h"
  14#include "bfa_cs.h"
  15#include "bfa_modules.h"
  16
  17BFA_TRC_FILE(HAL, FCXP);
  18
  19/*
  20 * LPS related definitions
  21 */
  22#define BFA_LPS_MIN_LPORTS      (1)
  23#define BFA_LPS_MAX_LPORTS      (256)
  24
  25/*
  26 * Maximum Vports supported per physical port or vf.
  27 */
  28#define BFA_LPS_MAX_VPORTS_SUPP_CB  255
  29#define BFA_LPS_MAX_VPORTS_SUPP_CT  190
  30
  31
  32/*
  33 * FC PORT related definitions
  34 */
  35/*
  36 * The port is considered disabled if corresponding physical port or IOC are
  37 * disabled explicitly
  38 */
  39#define BFA_PORT_IS_DISABLED(bfa) \
  40        ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
  41        (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
  42
  43/*
  44 * BFA port state machine events
  45 */
  46enum bfa_fcport_sm_event {
  47        BFA_FCPORT_SM_START     = 1,    /*  start port state machine    */
  48        BFA_FCPORT_SM_STOP      = 2,    /*  stop port state machine     */
  49        BFA_FCPORT_SM_ENABLE    = 3,    /*  enable port         */
  50        BFA_FCPORT_SM_DISABLE   = 4,    /*  disable port state machine */
  51        BFA_FCPORT_SM_FWRSP     = 5,    /*  firmware enable/disable rsp */
  52        BFA_FCPORT_SM_LINKUP    = 6,    /*  firmware linkup event       */
  53        BFA_FCPORT_SM_LINKDOWN  = 7,    /*  firmware linkup down        */
  54        BFA_FCPORT_SM_QRESUME   = 8,    /*  CQ space available  */
  55        BFA_FCPORT_SM_HWFAIL    = 9,    /*  IOC h/w failure             */
  56        BFA_FCPORT_SM_DPORTENABLE = 10, /*  enable dport      */
  57        BFA_FCPORT_SM_DPORTDISABLE = 11,/*  disable dport     */
  58        BFA_FCPORT_SM_FAA_MISCONFIG = 12,       /* FAA misconfiguratin */
  59        BFA_FCPORT_SM_DDPORTENABLE  = 13,       /* enable ddport        */
  60        BFA_FCPORT_SM_DDPORTDISABLE = 14,       /* disable ddport       */
  61};
  62
  63/*
  64 * BFA port link notification state machine events
  65 */
  66
  67enum bfa_fcport_ln_sm_event {
  68        BFA_FCPORT_LN_SM_LINKUP         = 1,    /*  linkup event        */
  69        BFA_FCPORT_LN_SM_LINKDOWN       = 2,    /*  linkdown event      */
  70        BFA_FCPORT_LN_SM_NOTIFICATION   = 3     /*  done notification   */
  71};
  72
  73/*
  74 * RPORT related definitions
  75 */
  76#define bfa_rport_offline_cb(__rp) do {                                 \
  77        if ((__rp)->bfa->fcs)                                           \
  78                bfa_cb_rport_offline((__rp)->rport_drv);      \
  79        else {                                                          \
  80                bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
  81                                __bfa_cb_rport_offline, (__rp));      \
  82        }                                                               \
  83} while (0)
  84
  85#define bfa_rport_online_cb(__rp) do {                                  \
  86        if ((__rp)->bfa->fcs)                                           \
  87                bfa_cb_rport_online((__rp)->rport_drv);      \
  88        else {                                                          \
  89                bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
  90                                  __bfa_cb_rport_online, (__rp));      \
  91                }                                                       \
  92} while (0)
  93
  94/*
  95 * forward declarations FCXP related functions
  96 */
  97static void     __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
  98static void     hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
  99                                struct bfi_fcxp_send_rsp_s *fcxp_rsp);
 100static void     hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
 101                                struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
 102static void     bfa_fcxp_qresume(void *cbarg);
 103static void     bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
 104                                struct bfi_fcxp_send_req_s *send_req);
 105
 106/*
 107 * forward declarations for LPS functions
 108 */
 109static void bfa_lps_login_rsp(struct bfa_s *bfa,
 110                                struct bfi_lps_login_rsp_s *rsp);
 111static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
 112static void bfa_lps_logout_rsp(struct bfa_s *bfa,
 113                                struct bfi_lps_logout_rsp_s *rsp);
 114static void bfa_lps_reqq_resume(void *lps_arg);
 115static void bfa_lps_free(struct bfa_lps_s *lps);
 116static void bfa_lps_send_login(struct bfa_lps_s *lps);
 117static void bfa_lps_send_logout(struct bfa_lps_s *lps);
 118static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
 119static void bfa_lps_login_comp(struct bfa_lps_s *lps);
 120static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
 121static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
 122
 123/*
 124 * forward declaration for LPS state machine
 125 */
 126static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
 127static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
 128static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
 129                                        event);
 130static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
 131static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
 132                                        enum bfa_lps_event event);
 133static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
 134static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
 135                                        event);
 136
 137/*
 138 * forward declaration for FC Port functions
 139 */
 140static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
 141static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
 142static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
 143static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
 144static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
 145static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
 146static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
 147                        enum bfa_port_linkstate event, bfa_boolean_t trunk);
 148static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
 149                                enum bfa_port_linkstate event);
 150static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
 151static void bfa_fcport_stats_get_timeout(void *cbarg);
 152static void bfa_fcport_stats_clr_timeout(void *cbarg);
 153static void bfa_trunk_iocdisable(struct bfa_s *bfa);
 154
 155/*
 156 * forward declaration for FC PORT state machine
 157 */
 158static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
 159                                        enum bfa_fcport_sm_event event);
 160static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
 161                                        enum bfa_fcport_sm_event event);
 162static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
 163                                        enum bfa_fcport_sm_event event);
 164static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
 165                                        enum bfa_fcport_sm_event event);
 166static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
 167                                        enum bfa_fcport_sm_event event);
 168static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
 169                                        enum bfa_fcport_sm_event event);
 170static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
 171                                        enum bfa_fcport_sm_event event);
 172static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
 173                                        enum bfa_fcport_sm_event event);
 174static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
 175                                        enum bfa_fcport_sm_event event);
 176static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
 177                                        enum bfa_fcport_sm_event event);
 178static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
 179                                        enum bfa_fcport_sm_event event);
 180static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
 181                                        enum bfa_fcport_sm_event event);
 182static void     bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
 183                                        enum bfa_fcport_sm_event event);
 184static void     bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
 185                                        enum bfa_fcport_sm_event event);
 186static void     bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
 187                                        enum bfa_fcport_sm_event event);
 188
 189static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
 190                                        enum bfa_fcport_ln_sm_event event);
 191static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
 192                                        enum bfa_fcport_ln_sm_event event);
 193static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
 194                                        enum bfa_fcport_ln_sm_event event);
 195static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
 196                                        enum bfa_fcport_ln_sm_event event);
 197static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
 198                                        enum bfa_fcport_ln_sm_event event);
 199static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
 200                                        enum bfa_fcport_ln_sm_event event);
 201static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
 202                                        enum bfa_fcport_ln_sm_event event);
 203
 204static struct bfa_sm_table_s hal_port_sm_table[] = {
 205        {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
 206        {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
 207        {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
 208        {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
 209        {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
 210        {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
 211        {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
 212        {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
 213        {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
 214        {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
 215        {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
 216        {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
 217        {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
 218        {BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
 219        {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
 220};
 221
 222
 223/*
 224 * forward declaration for RPORT related functions
 225 */
 226static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
 227static void             bfa_rport_free(struct bfa_rport_s *rport);
 228static bfa_boolean_t    bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
 229static bfa_boolean_t    bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
 230static bfa_boolean_t    bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
 231static void             __bfa_cb_rport_online(void *cbarg,
 232                                                bfa_boolean_t complete);
 233static void             __bfa_cb_rport_offline(void *cbarg,
 234                                                bfa_boolean_t complete);
 235
 236/*
 237 * forward declaration for RPORT state machine
 238 */
 239static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
 240                                        enum bfa_rport_event event);
 241static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
 242                                        enum bfa_rport_event event);
 243static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
 244                                        enum bfa_rport_event event);
 245static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
 246                                        enum bfa_rport_event event);
 247static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
 248                                        enum bfa_rport_event event);
 249static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
 250                                        enum bfa_rport_event event);
 251static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
 252                                        enum bfa_rport_event event);
 253static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
 254                                        enum bfa_rport_event event);
 255static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
 256                                        enum bfa_rport_event event);
 257static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
 258                                        enum bfa_rport_event event);
 259static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
 260                                        enum bfa_rport_event event);
 261static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
 262                                        enum bfa_rport_event event);
 263static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
 264                                        enum bfa_rport_event event);
 265
 266/*
 267 * PLOG related definitions
 268 */
 269static int
 270plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
 271{
 272        if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
 273                (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
 274                return 1;
 275
 276        if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
 277                (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
 278                return 1;
 279
 280        return 0;
 281}
 282
 283static void
 284bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
 285{
 286        u16 tail;
 287        struct bfa_plog_rec_s *pl_recp;
 288
 289        if (plog->plog_enabled == 0)
 290                return;
 291
 292        if (plkd_validate_logrec(pl_rec)) {
 293                WARN_ON(1);
 294                return;
 295        }
 296
 297        tail = plog->tail;
 298
 299        pl_recp = &(plog->plog_recs[tail]);
 300
 301        memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
 302
 303        pl_recp->tv = ktime_get_real_seconds();
 304        BFA_PL_LOG_REC_INCR(plog->tail);
 305
 306        if (plog->head == plog->tail)
 307                BFA_PL_LOG_REC_INCR(plog->head);
 308}
 309
 310void
 311bfa_plog_init(struct bfa_plog_s *plog)
 312{
 313        memset((char *)plog, 0, sizeof(struct bfa_plog_s));
 314
 315        memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
 316        plog->head = plog->tail = 0;
 317        plog->plog_enabled = 1;
 318}
 319
 320void
 321bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 322                enum bfa_plog_eid event,
 323                u16 misc, char *log_str)
 324{
 325        struct bfa_plog_rec_s  lp;
 326
 327        if (plog->plog_enabled) {
 328                memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 329                lp.mid = mid;
 330                lp.eid = event;
 331                lp.log_type = BFA_PL_LOG_TYPE_STRING;
 332                lp.misc = misc;
 333                strlcpy(lp.log_entry.string_log, log_str,
 334                        BFA_PL_STRING_LOG_SZ);
 335                lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
 336                bfa_plog_add(plog, &lp);
 337        }
 338}
 339
 340void
 341bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 342                enum bfa_plog_eid event,
 343                u16 misc, u32 *intarr, u32 num_ints)
 344{
 345        struct bfa_plog_rec_s  lp;
 346        u32 i;
 347
 348        if (num_ints > BFA_PL_INT_LOG_SZ)
 349                num_ints = BFA_PL_INT_LOG_SZ;
 350
 351        if (plog->plog_enabled) {
 352                memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 353                lp.mid = mid;
 354                lp.eid = event;
 355                lp.log_type = BFA_PL_LOG_TYPE_INT;
 356                lp.misc = misc;
 357
 358                for (i = 0; i < num_ints; i++)
 359                        lp.log_entry.int_log[i] = intarr[i];
 360
 361                lp.log_num_ints = (u8) num_ints;
 362
 363                bfa_plog_add(plog, &lp);
 364        }
 365}
 366
 367void
 368bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 369                        enum bfa_plog_eid event,
 370                        u16 misc, struct fchs_s *fchdr)
 371{
 372        struct bfa_plog_rec_s  lp;
 373        u32     *tmp_int = (u32 *) fchdr;
 374        u32     ints[BFA_PL_INT_LOG_SZ];
 375
 376        if (plog->plog_enabled) {
 377                memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 378
 379                ints[0] = tmp_int[0];
 380                ints[1] = tmp_int[1];
 381                ints[2] = tmp_int[4];
 382
 383                bfa_plog_intarr(plog, mid, event, misc, ints, 3);
 384        }
 385}
 386
 387void
 388bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 389                      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
 390                      u32 pld_w0)
 391{
 392        struct bfa_plog_rec_s  lp;
 393        u32     *tmp_int = (u32 *) fchdr;
 394        u32     ints[BFA_PL_INT_LOG_SZ];
 395
 396        if (plog->plog_enabled) {
 397                memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 398
 399                ints[0] = tmp_int[0];
 400                ints[1] = tmp_int[1];
 401                ints[2] = tmp_int[4];
 402                ints[3] = pld_w0;
 403
 404                bfa_plog_intarr(plog, mid, event, misc, ints, 4);
 405        }
 406}
 407
 408
 409/*
 410 *  fcxp_pvt BFA FCXP private functions
 411 */
 412
 413static void
 414claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
 415{
 416        u16     i;
 417        struct bfa_fcxp_s *fcxp;
 418
 419        fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
 420        memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
 421
 422        INIT_LIST_HEAD(&mod->fcxp_req_free_q);
 423        INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
 424        INIT_LIST_HEAD(&mod->fcxp_active_q);
 425        INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
 426        INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
 427
 428        mod->fcxp_list = fcxp;
 429
 430        for (i = 0; i < mod->num_fcxps; i++) {
 431                fcxp->fcxp_mod = mod;
 432                fcxp->fcxp_tag = i;
 433
 434                if (i < (mod->num_fcxps / 2)) {
 435                        list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
 436                        fcxp->req_rsp = BFA_TRUE;
 437                } else {
 438                        list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
 439                        fcxp->req_rsp = BFA_FALSE;
 440                }
 441
 442                bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
 443                fcxp->reqq_waiting = BFA_FALSE;
 444
 445                fcxp = fcxp + 1;
 446        }
 447
 448        bfa_mem_kva_curp(mod) = (void *)fcxp;
 449}
 450
 451void
 452bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
 453                struct bfa_s *bfa)
 454{
 455        struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
 456        struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
 457        struct bfa_mem_dma_s *seg_ptr;
 458        u16     nsegs, idx, per_seg_fcxp;
 459        u16     num_fcxps = cfg->fwcfg.num_fcxp_reqs;
 460        u32     per_fcxp_sz;
 461
 462        if (num_fcxps == 0)
 463                return;
 464
 465        if (cfg->drvcfg.min_cfg)
 466                per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
 467        else
 468                per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
 469
 470        /* dma memory */
 471        nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
 472        per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
 473
 474        bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
 475                if (num_fcxps >= per_seg_fcxp) {
 476                        num_fcxps -= per_seg_fcxp;
 477                        bfa_mem_dma_setup(minfo, seg_ptr,
 478                                per_seg_fcxp * per_fcxp_sz);
 479                } else
 480                        bfa_mem_dma_setup(minfo, seg_ptr,
 481                                num_fcxps * per_fcxp_sz);
 482        }
 483
 484        /* kva memory */
 485        bfa_mem_kva_setup(minfo, fcxp_kva,
 486                cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
 487}
 488
 489void
 490bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 491                struct bfa_pcidev_s *pcidev)
 492{
 493        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 494
 495        mod->bfa = bfa;
 496        mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
 497
 498        /*
 499         * Initialize FCXP request and response payload sizes.
 500         */
 501        mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
 502        if (!cfg->drvcfg.min_cfg)
 503                mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
 504
 505        INIT_LIST_HEAD(&mod->req_wait_q);
 506        INIT_LIST_HEAD(&mod->rsp_wait_q);
 507
 508        claim_fcxps_mem(mod);
 509}
 510
 511void
 512bfa_fcxp_iocdisable(struct bfa_s *bfa)
 513{
 514        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 515        struct bfa_fcxp_s *fcxp;
 516        struct list_head              *qe, *qen;
 517
 518        /* Enqueue unused fcxp resources to free_q */
 519        list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
 520        list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
 521
 522        list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
 523                fcxp = (struct bfa_fcxp_s *) qe;
 524                if (fcxp->caller == NULL) {
 525                        fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
 526                                        BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
 527                        bfa_fcxp_free(fcxp);
 528                } else {
 529                        fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
 530                        bfa_cb_queue(bfa, &fcxp->hcb_qe,
 531                                     __bfa_fcxp_send_cbfn, fcxp);
 532                }
 533        }
 534}
 535
 536static struct bfa_fcxp_s *
 537bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
 538{
 539        struct bfa_fcxp_s *fcxp;
 540
 541        if (req)
 542                bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
 543        else
 544                bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
 545
 546        if (fcxp)
 547                list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
 548
 549        return fcxp;
 550}
 551
 552static void
 553bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
 554               struct bfa_s *bfa,
 555               u8 *use_ibuf,
 556               u32 *nr_sgles,
 557               bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
 558               bfa_fcxp_get_sglen_t *r_sglen_cbfn,
 559               struct list_head *r_sgpg_q,
 560               int n_sgles,
 561               bfa_fcxp_get_sgaddr_t sga_cbfn,
 562               bfa_fcxp_get_sglen_t sglen_cbfn)
 563{
 564
 565        WARN_ON(bfa == NULL);
 566
 567        bfa_trc(bfa, fcxp->fcxp_tag);
 568
 569        if (n_sgles == 0) {
 570                *use_ibuf = 1;
 571        } else {
 572                WARN_ON(*sga_cbfn == NULL);
 573                WARN_ON(*sglen_cbfn == NULL);
 574
 575                *use_ibuf = 0;
 576                *r_sga_cbfn = sga_cbfn;
 577                *r_sglen_cbfn = sglen_cbfn;
 578
 579                *nr_sgles = n_sgles;
 580
 581                /*
 582                 * alloc required sgpgs
 583                 */
 584                if (n_sgles > BFI_SGE_INLINE)
 585                        WARN_ON(1);
 586        }
 587
 588}
 589
 590static void
 591bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
 592               void *caller, struct bfa_s *bfa, int nreq_sgles,
 593               int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
 594               bfa_fcxp_get_sglen_t req_sglen_cbfn,
 595               bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
 596               bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
 597{
 598
 599        WARN_ON(bfa == NULL);
 600
 601        bfa_trc(bfa, fcxp->fcxp_tag);
 602
 603        fcxp->caller = caller;
 604
 605        bfa_fcxp_init_reqrsp(fcxp, bfa,
 606                &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
 607                &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
 608                nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
 609
 610        bfa_fcxp_init_reqrsp(fcxp, bfa,
 611                &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
 612                &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
 613                nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
 614
 615}
 616
 617static void
 618bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
 619{
 620        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 621        struct bfa_fcxp_wqe_s *wqe;
 622
 623        if (fcxp->req_rsp)
 624                bfa_q_deq(&mod->req_wait_q, &wqe);
 625        else
 626                bfa_q_deq(&mod->rsp_wait_q, &wqe);
 627
 628        if (wqe) {
 629                bfa_trc(mod->bfa, fcxp->fcxp_tag);
 630
 631                bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
 632                        wqe->nrsp_sgles, wqe->req_sga_cbfn,
 633                        wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
 634                        wqe->rsp_sglen_cbfn);
 635
 636                wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
 637                return;
 638        }
 639
 640        WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
 641        list_del(&fcxp->qe);
 642
 643        if (fcxp->req_rsp)
 644                list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
 645        else
 646                list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
 647}
 648
 649static void
 650bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
 651                   bfa_status_t req_status, u32 rsp_len,
 652                   u32 resid_len, struct fchs_s *rsp_fchs)
 653{
 654        /* discarded fcxp completion */
 655}
 656
 657static void
 658__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
 659{
 660        struct bfa_fcxp_s *fcxp = cbarg;
 661
 662        if (complete) {
 663                fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
 664                                fcxp->rsp_status, fcxp->rsp_len,
 665                                fcxp->residue_len, &fcxp->rsp_fchs);
 666        } else {
 667                bfa_fcxp_free(fcxp);
 668        }
 669}
 670
 671static void
 672hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
 673{
 674        struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
 675        struct bfa_fcxp_s       *fcxp;
 676        u16             fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
 677
 678        bfa_trc(bfa, fcxp_tag);
 679
 680        fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
 681
 682        /*
 683         * @todo f/w should not set residue to non-0 when everything
 684         *       is received.
 685         */
 686        if (fcxp_rsp->req_status == BFA_STATUS_OK)
 687                fcxp_rsp->residue_len = 0;
 688        else
 689                fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
 690
 691        fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
 692
 693        WARN_ON(fcxp->send_cbfn == NULL);
 694
 695        hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
 696
 697        if (fcxp->send_cbfn != NULL) {
 698                bfa_trc(mod->bfa, (NULL == fcxp->caller));
 699                if (fcxp->caller == NULL) {
 700                        fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
 701                                        fcxp_rsp->req_status, fcxp_rsp->rsp_len,
 702                                        fcxp_rsp->residue_len, &fcxp_rsp->fchs);
 703                        /*
 704                         * fcxp automatically freed on return from the callback
 705                         */
 706                        bfa_fcxp_free(fcxp);
 707                } else {
 708                        fcxp->rsp_status = fcxp_rsp->req_status;
 709                        fcxp->rsp_len = fcxp_rsp->rsp_len;
 710                        fcxp->residue_len = fcxp_rsp->residue_len;
 711                        fcxp->rsp_fchs = fcxp_rsp->fchs;
 712
 713                        bfa_cb_queue(bfa, &fcxp->hcb_qe,
 714                                        __bfa_fcxp_send_cbfn, fcxp);
 715                }
 716        } else {
 717                bfa_trc(bfa, (NULL == fcxp->send_cbfn));
 718        }
 719}
 720
 721static void
 722hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
 723                 struct fchs_s *fchs)
 724{
 725        /*
 726         * TODO: TX ox_id
 727         */
 728        if (reqlen > 0) {
 729                if (fcxp->use_ireqbuf) {
 730                        u32     pld_w0 =
 731                                *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
 732
 733                        bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
 734                                        BFA_PL_EID_TX,
 735                                        reqlen + sizeof(struct fchs_s), fchs,
 736                                        pld_w0);
 737                } else {
 738                        bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
 739                                        BFA_PL_EID_TX,
 740                                        reqlen + sizeof(struct fchs_s),
 741                                        fchs);
 742                }
 743        } else {
 744                bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
 745                               reqlen + sizeof(struct fchs_s), fchs);
 746        }
 747}
 748
 749static void
 750hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
 751                 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
 752{
 753        if (fcxp_rsp->rsp_len > 0) {
 754                if (fcxp->use_irspbuf) {
 755                        u32     pld_w0 =
 756                                *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
 757
 758                        bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
 759                                              BFA_PL_EID_RX,
 760                                              (u16) fcxp_rsp->rsp_len,
 761                                              &fcxp_rsp->fchs, pld_w0);
 762                } else {
 763                        bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
 764                                       BFA_PL_EID_RX,
 765                                       (u16) fcxp_rsp->rsp_len,
 766                                       &fcxp_rsp->fchs);
 767                }
 768        } else {
 769                bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
 770                               (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
 771        }
 772}
 773
 774/*
 775 * Handler to resume sending fcxp when space in available in cpe queue.
 776 */
 777static void
 778bfa_fcxp_qresume(void *cbarg)
 779{
 780        struct bfa_fcxp_s               *fcxp = cbarg;
 781        struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
 782        struct bfi_fcxp_send_req_s      *send_req;
 783
 784        fcxp->reqq_waiting = BFA_FALSE;
 785        send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
 786        bfa_fcxp_queue(fcxp, send_req);
 787}
 788
 789/*
 790 * Queue fcxp send request to foimrware.
 791 */
 792static void
 793bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
 794{
 795        struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
 796        struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
 797        struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
 798        struct bfa_rport_s              *rport = reqi->bfa_rport;
 799
 800        bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
 801                    bfa_fn_lpu(bfa));
 802
 803        send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
 804        if (rport) {
 805                send_req->rport_fw_hndl = rport->fw_handle;
 806                send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
 807                if (send_req->max_frmsz == 0)
 808                        send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
 809        } else {
 810                send_req->rport_fw_hndl = 0;
 811                send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
 812        }
 813
 814        send_req->vf_id = cpu_to_be16(reqi->vf_id);
 815        send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
 816        send_req->class = reqi->class;
 817        send_req->rsp_timeout = rspi->rsp_timeout;
 818        send_req->cts = reqi->cts;
 819        send_req->fchs = reqi->fchs;
 820
 821        send_req->req_len = cpu_to_be32(reqi->req_tot_len);
 822        send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
 823
 824        /*
 825         * setup req sgles
 826         */
 827        if (fcxp->use_ireqbuf == 1) {
 828                bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
 829                                        BFA_FCXP_REQ_PLD_PA(fcxp));
 830        } else {
 831                if (fcxp->nreq_sgles > 0) {
 832                        WARN_ON(fcxp->nreq_sgles != 1);
 833                        bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
 834                                fcxp->req_sga_cbfn(fcxp->caller, 0));
 835                } else {
 836                        WARN_ON(reqi->req_tot_len != 0);
 837                        bfa_alen_set(&send_req->rsp_alen, 0, 0);
 838                }
 839        }
 840
 841        /*
 842         * setup rsp sgles
 843         */
 844        if (fcxp->use_irspbuf == 1) {
 845                WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
 846
 847                bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
 848                                        BFA_FCXP_RSP_PLD_PA(fcxp));
 849        } else {
 850                if (fcxp->nrsp_sgles > 0) {
 851                        WARN_ON(fcxp->nrsp_sgles != 1);
 852                        bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
 853                                fcxp->rsp_sga_cbfn(fcxp->caller, 0));
 854
 855                } else {
 856                        WARN_ON(rspi->rsp_maxlen != 0);
 857                        bfa_alen_set(&send_req->rsp_alen, 0, 0);
 858                }
 859        }
 860
 861        hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
 862
 863        bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
 864
 865        bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
 866        bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
 867}
 868
 869/*
 870 * Allocate an FCXP instance to send a response or to send a request
 871 * that has a response. Request/response buffers are allocated by caller.
 872 *
 873 * @param[in]   bfa             BFA bfa instance
 874 * @param[in]   nreq_sgles      Number of SG elements required for request
 875 *                              buffer. 0, if fcxp internal buffers are used.
 876 *                              Use bfa_fcxp_get_reqbuf() to get the
 877 *                              internal req buffer.
 878 * @param[in]   req_sgles       SG elements describing request buffer. Will be
 879 *                              copied in by BFA and hence can be freed on
 880 *                              return from this function.
 881 * @param[in]   get_req_sga     function ptr to be called to get a request SG
 882 *                              Address (given the sge index).
 883 * @param[in]   get_req_sglen   function ptr to be called to get a request SG
 884 *                              len (given the sge index).
 885 * @param[in]   get_rsp_sga     function ptr to be called to get a response SG
 886 *                              Address (given the sge index).
 887 * @param[in]   get_rsp_sglen   function ptr to be called to get a response SG
 888 *                              len (given the sge index).
 889 * @param[in]   req             Allocated FCXP is used to send req or rsp?
 890 *                              request - BFA_TRUE, response - BFA_FALSE
 891 *
 892 * @return FCXP instance. NULL on failure.
 893 */
 894struct bfa_fcxp_s *
 895bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
 896                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
 897                bfa_fcxp_get_sglen_t req_sglen_cbfn,
 898                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
 899                bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
 900{
 901        struct bfa_fcxp_s *fcxp = NULL;
 902
 903        WARN_ON(bfa == NULL);
 904
 905        fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
 906        if (fcxp == NULL)
 907                return NULL;
 908
 909        bfa_trc(bfa, fcxp->fcxp_tag);
 910
 911        bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
 912                        req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
 913
 914        return fcxp;
 915}
 916
 917/*
 918 * Get the internal request buffer pointer
 919 *
 920 * @param[in]   fcxp    BFA fcxp pointer
 921 *
 922 * @return              pointer to the internal request buffer
 923 */
 924void *
 925bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
 926{
 927        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 928        void    *reqbuf;
 929
 930        WARN_ON(fcxp->use_ireqbuf != 1);
 931        reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
 932                                mod->req_pld_sz + mod->rsp_pld_sz);
 933        return reqbuf;
 934}
 935
 936u32
 937bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
 938{
 939        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 940
 941        return mod->req_pld_sz;
 942}
 943
 944/*
 945 * Get the internal response buffer pointer
 946 *
 947 * @param[in]   fcxp    BFA fcxp pointer
 948 *
 949 * @return              pointer to the internal request buffer
 950 */
 951void *
 952bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
 953{
 954        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 955        void    *fcxp_buf;
 956
 957        WARN_ON(fcxp->use_irspbuf != 1);
 958
 959        fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
 960                                mod->req_pld_sz + mod->rsp_pld_sz);
 961
 962        /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
 963        return ((u8 *) fcxp_buf) + mod->req_pld_sz;
 964}
 965
 966/*
 967 * Free the BFA FCXP
 968 *
 969 * @param[in]   fcxp                    BFA fcxp pointer
 970 *
 971 * @return              void
 972 */
 973void
 974bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
 975{
 976        struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 977
 978        WARN_ON(fcxp == NULL);
 979        bfa_trc(mod->bfa, fcxp->fcxp_tag);
 980        bfa_fcxp_put(fcxp);
 981}
 982
 983/*
 984 * Send a FCXP request
 985 *
 986 * @param[in]   fcxp    BFA fcxp pointer
 987 * @param[in]   rport   BFA rport pointer. Could be left NULL for WKA rports
 988 * @param[in]   vf_id   virtual Fabric ID
 989 * @param[in]   lp_tag  lport tag
 990 * @param[in]   cts     use Continuous sequence
 991 * @param[in]   cos     fc Class of Service
 992 * @param[in]   reqlen  request length, does not include FCHS length
 993 * @param[in]   fchs    fc Header Pointer. The header content will be copied
 994 *                      in by BFA.
 995 *
 996 * @param[in]   cbfn    call back function to be called on receiving
 997 *                                                              the response
 998 * @param[in]   cbarg   arg for cbfn
 999 * @param[in]   rsp_timeout
1000 *                      response timeout
1001 *
1002 * @return              bfa_status_t
1003 */
1004void
1005bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1006              u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1007              u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1008              void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1009{
1010        struct bfa_s                    *bfa  = fcxp->fcxp_mod->bfa;
1011        struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
1012        struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
1013        struct bfi_fcxp_send_req_s      *send_req;
1014
1015        bfa_trc(bfa, fcxp->fcxp_tag);
1016
1017        /*
1018         * setup request/response info
1019         */
1020        reqi->bfa_rport = rport;
1021        reqi->vf_id = vf_id;
1022        reqi->lp_tag = lp_tag;
1023        reqi->class = cos;
1024        rspi->rsp_timeout = rsp_timeout;
1025        reqi->cts = cts;
1026        reqi->fchs = *fchs;
1027        reqi->req_tot_len = reqlen;
1028        rspi->rsp_maxlen = rsp_maxlen;
1029        fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1030        fcxp->send_cbarg = cbarg;
1031
1032        /*
1033         * If no room in CPE queue, wait for space in request queue
1034         */
1035        send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1036        if (!send_req) {
1037                bfa_trc(bfa, fcxp->fcxp_tag);
1038                fcxp->reqq_waiting = BFA_TRUE;
1039                bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1040                return;
1041        }
1042
1043        bfa_fcxp_queue(fcxp, send_req);
1044}
1045
1046/*
1047 * Abort a BFA FCXP
1048 *
1049 * @param[in]   fcxp    BFA fcxp pointer
1050 *
1051 * @return              void
1052 */
1053bfa_status_t
1054bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1055{
1056        bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1057        WARN_ON(1);
1058        return BFA_STATUS_OK;
1059}
1060
1061void
1062bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1063               bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1064               void *caller, int nreq_sgles,
1065               int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1066               bfa_fcxp_get_sglen_t req_sglen_cbfn,
1067               bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1068               bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
1069{
1070        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1071
1072        if (req)
1073                WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1074        else
1075                WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
1076
1077        wqe->alloc_cbfn = alloc_cbfn;
1078        wqe->alloc_cbarg = alloc_cbarg;
1079        wqe->caller = caller;
1080        wqe->bfa = bfa;
1081        wqe->nreq_sgles = nreq_sgles;
1082        wqe->nrsp_sgles = nrsp_sgles;
1083        wqe->req_sga_cbfn = req_sga_cbfn;
1084        wqe->req_sglen_cbfn = req_sglen_cbfn;
1085        wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1086        wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1087
1088        if (req)
1089                list_add_tail(&wqe->qe, &mod->req_wait_q);
1090        else
1091                list_add_tail(&wqe->qe, &mod->rsp_wait_q);
1092}
1093
1094void
1095bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1096{
1097        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1098
1099        WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1100                !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
1101        list_del(&wqe->qe);
1102}
1103
1104void
1105bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1106{
1107        /*
1108         * If waiting for room in request queue, cancel reqq wait
1109         * and free fcxp.
1110         */
1111        if (fcxp->reqq_waiting) {
1112                fcxp->reqq_waiting = BFA_FALSE;
1113                bfa_reqq_wcancel(&fcxp->reqq_wqe);
1114                bfa_fcxp_free(fcxp);
1115                return;
1116        }
1117
1118        fcxp->send_cbfn = bfa_fcxp_null_comp;
1119}
1120
1121void
1122bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1123{
1124        switch (msg->mhdr.msg_id) {
1125        case BFI_FCXP_I2H_SEND_RSP:
1126                hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1127                break;
1128
1129        default:
1130                bfa_trc(bfa, msg->mhdr.msg_id);
1131                WARN_ON(1);
1132        }
1133}
1134
1135u32
1136bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1137{
1138        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1139
1140        return mod->rsp_pld_sz;
1141}
1142
1143void
1144bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1145{
1146        struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
1147        struct list_head        *qe;
1148        int     i;
1149
1150        for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1151                if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1152                        bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1153                        list_add_tail(qe, &mod->fcxp_req_unused_q);
1154                } else {
1155                        bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1156                        list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1157                }
1158        }
1159}
1160
1161/*
1162 *  BFA LPS state machine functions
1163 */
1164
1165/*
1166 * Init state -- no login
1167 */
1168static void
1169bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1170{
1171        bfa_trc(lps->bfa, lps->bfa_tag);
1172        bfa_trc(lps->bfa, event);
1173
1174        switch (event) {
1175        case BFA_LPS_SM_LOGIN:
1176                if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1177                        bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1178                        bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1179                } else {
1180                        bfa_sm_set_state(lps, bfa_lps_sm_login);
1181                        bfa_lps_send_login(lps);
1182                }
1183
1184                if (lps->fdisc)
1185                        bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1186                                BFA_PL_EID_LOGIN, 0, "FDISC Request");
1187                else
1188                        bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1189                                BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1190                break;
1191
1192        case BFA_LPS_SM_LOGOUT:
1193                bfa_lps_logout_comp(lps);
1194                break;
1195
1196        case BFA_LPS_SM_DELETE:
1197                bfa_lps_free(lps);
1198                break;
1199
1200        case BFA_LPS_SM_RX_CVL:
1201        case BFA_LPS_SM_OFFLINE:
1202                break;
1203
1204        case BFA_LPS_SM_FWRSP:
1205                /*
1206                 * Could happen when fabric detects loopback and discards
1207                 * the lps request. Fw will eventually sent out the timeout
1208                 * Just ignore
1209                 */
1210                break;
1211        case BFA_LPS_SM_SET_N2N_PID:
1212                /*
1213                 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1214                 * this event. Ignore this event.
1215                 */
1216                break;
1217
1218        default:
1219                bfa_sm_fault(lps->bfa, event);
1220        }
1221}
1222
1223/*
1224 * login is in progress -- awaiting response from firmware
1225 */
1226static void
1227bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1228{
1229        bfa_trc(lps->bfa, lps->bfa_tag);
1230        bfa_trc(lps->bfa, event);
1231
1232        switch (event) {
1233        case BFA_LPS_SM_FWRSP:
1234                if (lps->status == BFA_STATUS_OK) {
1235                        bfa_sm_set_state(lps, bfa_lps_sm_online);
1236                        if (lps->fdisc)
1237                                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1238                                        BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1239                        else
1240                                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1241                                        BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1242                        /* If N2N, send the assigned PID to FW */
1243                        bfa_trc(lps->bfa, lps->fport);
1244                        bfa_trc(lps->bfa, lps->lp_pid);
1245
1246                        if (!lps->fport && lps->lp_pid)
1247                                bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1248                } else {
1249                        bfa_sm_set_state(lps, bfa_lps_sm_init);
1250                        if (lps->fdisc)
1251                                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1252                                        BFA_PL_EID_LOGIN, 0,
1253                                        "FDISC Fail (RJT or timeout)");
1254                        else
1255                                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1256                                        BFA_PL_EID_LOGIN, 0,
1257                                        "FLOGI Fail (RJT or timeout)");
1258                }
1259                bfa_lps_login_comp(lps);
1260                break;
1261
1262        case BFA_LPS_SM_OFFLINE:
1263        case BFA_LPS_SM_DELETE:
1264                bfa_sm_set_state(lps, bfa_lps_sm_init);
1265                break;
1266
1267        case BFA_LPS_SM_SET_N2N_PID:
1268                bfa_trc(lps->bfa, lps->fport);
1269                bfa_trc(lps->bfa, lps->lp_pid);
1270                break;
1271
1272        default:
1273                bfa_sm_fault(lps->bfa, event);
1274        }
1275}
1276
1277/*
1278 * login pending - awaiting space in request queue
1279 */
1280static void
1281bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1282{
1283        bfa_trc(lps->bfa, lps->bfa_tag);
1284        bfa_trc(lps->bfa, event);
1285
1286        switch (event) {
1287        case BFA_LPS_SM_RESUME:
1288                bfa_sm_set_state(lps, bfa_lps_sm_login);
1289                bfa_lps_send_login(lps);
1290                break;
1291
1292        case BFA_LPS_SM_OFFLINE:
1293        case BFA_LPS_SM_DELETE:
1294                bfa_sm_set_state(lps, bfa_lps_sm_init);
1295                bfa_reqq_wcancel(&lps->wqe);
1296                break;
1297
1298        case BFA_LPS_SM_RX_CVL:
1299                /*
1300                 * Login was not even sent out; so when getting out
1301                 * of this state, it will appear like a login retry
1302                 * after Clear virtual link
1303                 */
1304                break;
1305
1306        default:
1307                bfa_sm_fault(lps->bfa, event);
1308        }
1309}
1310
1311/*
1312 * login complete
1313 */
1314static void
1315bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1316{
1317        bfa_trc(lps->bfa, lps->bfa_tag);
1318        bfa_trc(lps->bfa, event);
1319
1320        switch (event) {
1321        case BFA_LPS_SM_LOGOUT:
1322                if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1323                        bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1324                        bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1325                } else {
1326                        bfa_sm_set_state(lps, bfa_lps_sm_logout);
1327                        bfa_lps_send_logout(lps);
1328                }
1329                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1330                        BFA_PL_EID_LOGO, 0, "Logout");
1331                break;
1332
1333        case BFA_LPS_SM_RX_CVL:
1334                bfa_sm_set_state(lps, bfa_lps_sm_init);
1335
1336                /* Let the vport module know about this event */
1337                bfa_lps_cvl_event(lps);
1338                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1339                        BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1340                break;
1341
1342        case BFA_LPS_SM_SET_N2N_PID:
1343                if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1344                        bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1345                        bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1346                } else
1347                        bfa_lps_send_set_n2n_pid(lps);
1348                break;
1349
1350        case BFA_LPS_SM_OFFLINE:
1351        case BFA_LPS_SM_DELETE:
1352                bfa_sm_set_state(lps, bfa_lps_sm_init);
1353                break;
1354
1355        default:
1356                bfa_sm_fault(lps->bfa, event);
1357        }
1358}
1359
1360/*
1361 * login complete
1362 */
1363static void
1364bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1365{
1366        bfa_trc(lps->bfa, lps->bfa_tag);
1367        bfa_trc(lps->bfa, event);
1368
1369        switch (event) {
1370        case BFA_LPS_SM_RESUME:
1371                bfa_sm_set_state(lps, bfa_lps_sm_online);
1372                bfa_lps_send_set_n2n_pid(lps);
1373                break;
1374
1375        case BFA_LPS_SM_LOGOUT:
1376                bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1377                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1378                        BFA_PL_EID_LOGO, 0, "Logout");
1379                break;
1380
1381        case BFA_LPS_SM_RX_CVL:
1382                bfa_sm_set_state(lps, bfa_lps_sm_init);
1383                bfa_reqq_wcancel(&lps->wqe);
1384
1385                /* Let the vport module know about this event */
1386                bfa_lps_cvl_event(lps);
1387                bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1388                        BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1389                break;
1390
1391        case BFA_LPS_SM_OFFLINE:
1392        case BFA_LPS_SM_DELETE:
1393                bfa_sm_set_state(lps, bfa_lps_sm_init);
1394                bfa_reqq_wcancel(&lps->wqe);
1395                break;
1396
1397        default:
1398                bfa_sm_fault(lps->bfa, event);
1399        }
1400}
1401
1402/*
1403 * logout in progress - awaiting firmware response
1404 */
1405static void
1406bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1407{
1408        bfa_trc(lps->bfa, lps->bfa_tag);
1409        bfa_trc(lps->bfa, event);
1410
1411        switch (event) {
1412        case BFA_LPS_SM_FWRSP:
1413        case BFA_LPS_SM_OFFLINE:
1414                bfa_sm_set_state(lps, bfa_lps_sm_init);
1415                bfa_lps_logout_comp(lps);
1416                break;
1417
1418        case BFA_LPS_SM_DELETE:
1419                bfa_sm_set_state(lps, bfa_lps_sm_init);
1420                break;
1421
1422        default:
1423                bfa_sm_fault(lps->bfa, event);
1424        }
1425}
1426
1427/*
1428 * logout pending -- awaiting space in request queue
1429 */
1430static void
1431bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1432{
1433        bfa_trc(lps->bfa, lps->bfa_tag);
1434        bfa_trc(lps->bfa, event);
1435
1436        switch (event) {
1437        case BFA_LPS_SM_RESUME:
1438                bfa_sm_set_state(lps, bfa_lps_sm_logout);
1439                bfa_lps_send_logout(lps);
1440                break;
1441
1442        case BFA_LPS_SM_OFFLINE:
1443        case BFA_LPS_SM_DELETE:
1444                bfa_sm_set_state(lps, bfa_lps_sm_init);
1445                bfa_reqq_wcancel(&lps->wqe);
1446                break;
1447
1448        default:
1449                bfa_sm_fault(lps->bfa, event);
1450        }
1451}
1452
1453
1454
1455/*
1456 *  lps_pvt BFA LPS private functions
1457 */
1458
1459/*
1460 * return memory requirement
1461 */
1462void
1463bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1464                struct bfa_s *bfa)
1465{
1466        struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1467
1468        if (cfg->drvcfg.min_cfg)
1469                bfa_mem_kva_setup(minfo, lps_kva,
1470                        sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1471        else
1472                bfa_mem_kva_setup(minfo, lps_kva,
1473                        sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1474}
1475
1476/*
1477 * bfa module attach at initialization time
1478 */
1479void
1480bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1481        struct bfa_pcidev_s *pcidev)
1482{
1483        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1484        struct bfa_lps_s        *lps;
1485        int                     i;
1486
1487        mod->num_lps = BFA_LPS_MAX_LPORTS;
1488        if (cfg->drvcfg.min_cfg)
1489                mod->num_lps = BFA_LPS_MIN_LPORTS;
1490        else
1491                mod->num_lps = BFA_LPS_MAX_LPORTS;
1492        mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1493
1494        bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1495
1496        INIT_LIST_HEAD(&mod->lps_free_q);
1497        INIT_LIST_HEAD(&mod->lps_active_q);
1498        INIT_LIST_HEAD(&mod->lps_login_q);
1499
1500        for (i = 0; i < mod->num_lps; i++, lps++) {
1501                lps->bfa        = bfa;
1502                lps->bfa_tag    = (u8) i;
1503                lps->reqq       = BFA_REQQ_LPS;
1504                bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1505                list_add_tail(&lps->qe, &mod->lps_free_q);
1506        }
1507}
1508
1509/*
1510 * IOC in disabled state -- consider all lps offline
1511 */
1512void
1513bfa_lps_iocdisable(struct bfa_s *bfa)
1514{
1515        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1516        struct bfa_lps_s        *lps;
1517        struct list_head                *qe, *qen;
1518
1519        list_for_each_safe(qe, qen, &mod->lps_active_q) {
1520                lps = (struct bfa_lps_s *) qe;
1521                bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1522        }
1523        list_for_each_safe(qe, qen, &mod->lps_login_q) {
1524                lps = (struct bfa_lps_s *) qe;
1525                bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1526        }
1527        list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1528}
1529
1530/*
1531 * Firmware login response
1532 */
1533static void
1534bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1535{
1536        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1537        struct bfa_lps_s        *lps;
1538
1539        WARN_ON(rsp->bfa_tag >= mod->num_lps);
1540        lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1541
1542        lps->status = rsp->status;
1543        switch (rsp->status) {
1544        case BFA_STATUS_OK:
1545                lps->fw_tag     = rsp->fw_tag;
1546                lps->fport      = rsp->f_port;
1547                if (lps->fport)
1548                        lps->lp_pid = rsp->lp_pid;
1549                lps->npiv_en    = rsp->npiv_en;
1550                lps->pr_bbcred  = be16_to_cpu(rsp->bb_credit);
1551                lps->pr_pwwn    = rsp->port_name;
1552                lps->pr_nwwn    = rsp->node_name;
1553                lps->auth_req   = rsp->auth_req;
1554                lps->lp_mac     = rsp->lp_mac;
1555                lps->brcd_switch = rsp->brcd_switch;
1556                lps->fcf_mac    = rsp->fcf_mac;
1557
1558                break;
1559
1560        case BFA_STATUS_FABRIC_RJT:
1561                lps->lsrjt_rsn = rsp->lsrjt_rsn;
1562                lps->lsrjt_expl = rsp->lsrjt_expl;
1563
1564                break;
1565
1566        case BFA_STATUS_EPROTOCOL:
1567                lps->ext_status = rsp->ext_status;
1568
1569                break;
1570
1571        case BFA_STATUS_VPORT_MAX:
1572                if (rsp->ext_status)
1573                        bfa_lps_no_res(lps, rsp->ext_status);
1574                break;
1575
1576        default:
1577                /* Nothing to do with other status */
1578                break;
1579        }
1580
1581        list_del(&lps->qe);
1582        list_add_tail(&lps->qe, &mod->lps_active_q);
1583        bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1584}
1585
1586static void
1587bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1588{
1589        struct bfa_s            *bfa = first_lps->bfa;
1590        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1591        struct list_head        *qe, *qe_next;
1592        struct bfa_lps_s        *lps;
1593
1594        bfa_trc(bfa, count);
1595
1596        qe = bfa_q_next(first_lps);
1597
1598        while (count && qe) {
1599                qe_next = bfa_q_next(qe);
1600                lps = (struct bfa_lps_s *)qe;
1601                bfa_trc(bfa, lps->bfa_tag);
1602                lps->status = first_lps->status;
1603                list_del(&lps->qe);
1604                list_add_tail(&lps->qe, &mod->lps_active_q);
1605                bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1606                qe = qe_next;
1607                count--;
1608        }
1609}
1610
1611/*
1612 * Firmware logout response
1613 */
1614static void
1615bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1616{
1617        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1618        struct bfa_lps_s        *lps;
1619
1620        WARN_ON(rsp->bfa_tag >= mod->num_lps);
1621        lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1622
1623        bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1624}
1625
1626/*
1627 * Firmware received a Clear virtual link request (for FCoE)
1628 */
1629static void
1630bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1631{
1632        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1633        struct bfa_lps_s        *lps;
1634
1635        lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1636
1637        bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1638}
1639
1640/*
1641 * Space is available in request queue, resume queueing request to firmware.
1642 */
1643static void
1644bfa_lps_reqq_resume(void *lps_arg)
1645{
1646        struct bfa_lps_s        *lps = lps_arg;
1647
1648        bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1649}
1650
1651/*
1652 * lps is freed -- triggered by vport delete
1653 */
1654static void
1655bfa_lps_free(struct bfa_lps_s *lps)
1656{
1657        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1658
1659        lps->lp_pid = 0;
1660        list_del(&lps->qe);
1661        list_add_tail(&lps->qe, &mod->lps_free_q);
1662}
1663
1664/*
1665 * send login request to firmware
1666 */
1667static void
1668bfa_lps_send_login(struct bfa_lps_s *lps)
1669{
1670        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1671        struct bfi_lps_login_req_s      *m;
1672
1673        m = bfa_reqq_next(lps->bfa, lps->reqq);
1674        WARN_ON(!m);
1675
1676        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1677                bfa_fn_lpu(lps->bfa));
1678
1679        m->bfa_tag      = lps->bfa_tag;
1680        m->alpa         = lps->alpa;
1681        m->pdu_size     = cpu_to_be16(lps->pdusz);
1682        m->pwwn         = lps->pwwn;
1683        m->nwwn         = lps->nwwn;
1684        m->fdisc        = lps->fdisc;
1685        m->auth_en      = lps->auth_en;
1686
1687        bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1688        list_del(&lps->qe);
1689        list_add_tail(&lps->qe, &mod->lps_login_q);
1690}
1691
1692/*
1693 * send logout request to firmware
1694 */
1695static void
1696bfa_lps_send_logout(struct bfa_lps_s *lps)
1697{
1698        struct bfi_lps_logout_req_s *m;
1699
1700        m = bfa_reqq_next(lps->bfa, lps->reqq);
1701        WARN_ON(!m);
1702
1703        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1704                bfa_fn_lpu(lps->bfa));
1705
1706        m->fw_tag = lps->fw_tag;
1707        m->port_name = lps->pwwn;
1708        bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1709}
1710
1711/*
1712 * send n2n pid set request to firmware
1713 */
1714static void
1715bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1716{
1717        struct bfi_lps_n2n_pid_req_s *m;
1718
1719        m = bfa_reqq_next(lps->bfa, lps->reqq);
1720        WARN_ON(!m);
1721
1722        bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1723                bfa_fn_lpu(lps->bfa));
1724
1725        m->fw_tag = lps->fw_tag;
1726        m->lp_pid = lps->lp_pid;
1727        bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1728}
1729
1730/*
1731 * Indirect login completion handler for non-fcs
1732 */
1733static void
1734bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1735{
1736        struct bfa_lps_s *lps   = arg;
1737
1738        if (!complete)
1739                return;
1740
1741        if (lps->fdisc)
1742                bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1743        else
1744                bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1745}
1746
1747/*
1748 * Login completion handler -- direct call for fcs, queue for others
1749 */
1750static void
1751bfa_lps_login_comp(struct bfa_lps_s *lps)
1752{
1753        if (!lps->bfa->fcs) {
1754                bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1755                        lps);
1756                return;
1757        }
1758
1759        if (lps->fdisc)
1760                bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1761        else
1762                bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1763}
1764
1765/*
1766 * Indirect logout completion handler for non-fcs
1767 */
1768static void
1769bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1770{
1771        struct bfa_lps_s *lps   = arg;
1772
1773        if (!complete)
1774                return;
1775
1776        if (lps->fdisc)
1777                bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1778        else
1779                bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
1780}
1781
1782/*
1783 * Logout completion handler -- direct call for fcs, queue for others
1784 */
1785static void
1786bfa_lps_logout_comp(struct bfa_lps_s *lps)
1787{
1788        if (!lps->bfa->fcs) {
1789                bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1790                        lps);
1791                return;
1792        }
1793        if (lps->fdisc)
1794                bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1795}
1796
1797/*
1798 * Clear virtual link completion handler for non-fcs
1799 */
1800static void
1801bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1802{
1803        struct bfa_lps_s *lps   = arg;
1804
1805        if (!complete)
1806                return;
1807
1808        /* Clear virtual link to base port will result in link down */
1809        if (lps->fdisc)
1810                bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1811}
1812
1813/*
1814 * Received Clear virtual link event --direct call for fcs,
1815 * queue for others
1816 */
1817static void
1818bfa_lps_cvl_event(struct bfa_lps_s *lps)
1819{
1820        if (!lps->bfa->fcs) {
1821                bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1822                        lps);
1823                return;
1824        }
1825
1826        /* Clear virtual link to base port will result in link down */
1827        if (lps->fdisc)
1828                bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1829}
1830
1831
1832
1833/*
1834 *  lps_public BFA LPS public functions
1835 */
1836
1837u32
1838bfa_lps_get_max_vport(struct bfa_s *bfa)
1839{
1840        if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1841                return BFA_LPS_MAX_VPORTS_SUPP_CT;
1842        else
1843                return BFA_LPS_MAX_VPORTS_SUPP_CB;
1844}
1845
1846/*
1847 * Allocate a lport srvice tag.
1848 */
1849struct bfa_lps_s  *
1850bfa_lps_alloc(struct bfa_s *bfa)
1851{
1852        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1853        struct bfa_lps_s        *lps = NULL;
1854
1855        bfa_q_deq(&mod->lps_free_q, &lps);
1856
1857        if (lps == NULL)
1858                return NULL;
1859
1860        list_add_tail(&lps->qe, &mod->lps_active_q);
1861
1862        bfa_sm_set_state(lps, bfa_lps_sm_init);
1863        return lps;
1864}
1865
1866/*
1867 * Free lport service tag. This can be called anytime after an alloc.
1868 * No need to wait for any pending login/logout completions.
1869 */
1870void
1871bfa_lps_delete(struct bfa_lps_s *lps)
1872{
1873        bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1874}
1875
1876/*
1877 * Initiate a lport login.
1878 */
1879void
1880bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1881        wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1882{
1883        lps->uarg       = uarg;
1884        lps->alpa       = alpa;
1885        lps->pdusz      = pdusz;
1886        lps->pwwn       = pwwn;
1887        lps->nwwn       = nwwn;
1888        lps->fdisc      = BFA_FALSE;
1889        lps->auth_en    = auth_en;
1890        bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1891}
1892
1893/*
1894 * Initiate a lport fdisc login.
1895 */
1896void
1897bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1898        wwn_t nwwn)
1899{
1900        lps->uarg       = uarg;
1901        lps->alpa       = 0;
1902        lps->pdusz      = pdusz;
1903        lps->pwwn       = pwwn;
1904        lps->nwwn       = nwwn;
1905        lps->fdisc      = BFA_TRUE;
1906        lps->auth_en    = BFA_FALSE;
1907        bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1908}
1909
1910
1911/*
1912 * Initiate a lport FDSIC logout.
1913 */
1914void
1915bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1916{
1917        bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1918}
1919
1920u8
1921bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1922{
1923        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1924
1925        return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1926}
1927
1928/*
1929 * Return lport services tag given the pid
1930 */
1931u8
1932bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1933{
1934        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1935        struct bfa_lps_s        *lps;
1936        int                     i;
1937
1938        for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1939                if (lps->lp_pid == pid)
1940                        return lps->bfa_tag;
1941        }
1942
1943        /* Return base port tag anyway */
1944        return 0;
1945}
1946
1947
1948/*
1949 * return port id assigned to the base lport
1950 */
1951u32
1952bfa_lps_get_base_pid(struct bfa_s *bfa)
1953{
1954        struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1955
1956        return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1957}
1958
1959/*
1960 * Set PID in case of n2n (which is assigned during PLOGI)
1961 */
1962void
1963bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1964{
1965        bfa_trc(lps->bfa, lps->bfa_tag);
1966        bfa_trc(lps->bfa, n2n_pid);
1967
1968        lps->lp_pid = n2n_pid;
1969        bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1970}
1971
1972/*
1973 * LPS firmware message class handler.
1974 */
1975void
1976bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1977{
1978        union bfi_lps_i2h_msg_u msg;
1979
1980        bfa_trc(bfa, m->mhdr.msg_id);
1981        msg.msg = m;
1982
1983        switch (m->mhdr.msg_id) {
1984        case BFI_LPS_I2H_LOGIN_RSP:
1985                bfa_lps_login_rsp(bfa, msg.login_rsp);
1986                break;
1987
1988        case BFI_LPS_I2H_LOGOUT_RSP:
1989                bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1990                break;
1991
1992        case BFI_LPS_I2H_CVL_EVENT:
1993                bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
1994                break;
1995
1996        default:
1997                bfa_trc(bfa, m->mhdr.msg_id);
1998                WARN_ON(1);
1999        }
2000}
2001
2002static void
2003bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2004{
2005        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2006        struct bfa_aen_entry_s  *aen_entry;
2007
2008        bfad_get_aen_entry(bfad, aen_entry);
2009        if (!aen_entry)
2010                return;
2011
2012        aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2013        aen_entry->aen_data.port.pwwn = fcport->pwwn;
2014
2015        /* Send the AEN notification */
2016        bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2017                                  BFA_AEN_CAT_PORT, event);
2018}
2019
2020/*
2021 * FC PORT state machine functions
2022 */
2023static void
2024bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2025                        enum bfa_fcport_sm_event event)
2026{
2027        bfa_trc(fcport->bfa, event);
2028
2029        switch (event) {
2030        case BFA_FCPORT_SM_START:
2031                /*
2032                 * Start event after IOC is configured and BFA is started.
2033                 */
2034                fcport->use_flash_cfg = BFA_TRUE;
2035
2036                if (bfa_fcport_send_enable(fcport)) {
2037                        bfa_trc(fcport->bfa, BFA_TRUE);
2038                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2039                } else {
2040                        bfa_trc(fcport->bfa, BFA_FALSE);
2041                        bfa_sm_set_state(fcport,
2042                                        bfa_fcport_sm_enabling_qwait);
2043                }
2044                break;
2045
2046        case BFA_FCPORT_SM_ENABLE:
2047                /*
2048                 * Port is persistently configured to be in enabled state. Do
2049                 * not change state. Port enabling is done when START event is
2050                 * received.
2051                 */
2052                break;
2053
2054        case BFA_FCPORT_SM_DISABLE:
2055                /*
2056                 * If a port is persistently configured to be disabled, the
2057                 * first event will a port disable request.
2058                 */
2059                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2060                break;
2061
2062        case BFA_FCPORT_SM_HWFAIL:
2063                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2064                break;
2065
2066        default:
2067                bfa_sm_fault(fcport->bfa, event);
2068        }
2069}
2070
2071static void
2072bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2073                                enum bfa_fcport_sm_event event)
2074{
2075        char pwwn_buf[BFA_STRING_32];
2076        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2077        bfa_trc(fcport->bfa, event);
2078
2079        switch (event) {
2080        case BFA_FCPORT_SM_QRESUME:
2081                bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2082                bfa_fcport_send_enable(fcport);
2083                break;
2084
2085        case BFA_FCPORT_SM_STOP:
2086                bfa_reqq_wcancel(&fcport->reqq_wait);
2087                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2088                break;
2089
2090        case BFA_FCPORT_SM_ENABLE:
2091                /*
2092                 * Already enable is in progress.
2093                 */
2094                break;
2095
2096        case BFA_FCPORT_SM_DISABLE:
2097                /*
2098                 * Just send disable request to firmware when room becomes
2099                 * available in request queue.
2100                 */
2101                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2102                bfa_reqq_wcancel(&fcport->reqq_wait);
2103                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2104                                BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2105                wwn2str(pwwn_buf, fcport->pwwn);
2106                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2107                        "Base port disabled: WWN = %s\n", pwwn_buf);
2108                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2109                break;
2110
2111        case BFA_FCPORT_SM_LINKUP:
2112        case BFA_FCPORT_SM_LINKDOWN:
2113                /*
2114                 * Possible to get link events when doing back-to-back
2115                 * enable/disables.
2116                 */
2117                break;
2118
2119        case BFA_FCPORT_SM_HWFAIL:
2120                bfa_reqq_wcancel(&fcport->reqq_wait);
2121                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2122                break;
2123
2124        case BFA_FCPORT_SM_FAA_MISCONFIG:
2125                bfa_fcport_reset_linkinfo(fcport);
2126                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2127                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2128                break;
2129
2130        default:
2131                bfa_sm_fault(fcport->bfa, event);
2132        }
2133}
2134
2135static void
2136bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2137                                                enum bfa_fcport_sm_event event)
2138{
2139        char pwwn_buf[BFA_STRING_32];
2140        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2141        bfa_trc(fcport->bfa, event);
2142
2143        switch (event) {
2144        case BFA_FCPORT_SM_FWRSP:
2145        case BFA_FCPORT_SM_LINKDOWN:
2146                bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2147                break;
2148
2149        case BFA_FCPORT_SM_LINKUP:
2150                bfa_fcport_update_linkinfo(fcport);
2151                bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2152
2153                WARN_ON(!fcport->event_cbfn);
2154                bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2155                break;
2156
2157        case BFA_FCPORT_SM_ENABLE:
2158                /*
2159                 * Already being enabled.
2160                 */
2161                break;
2162
2163        case BFA_FCPORT_SM_DISABLE:
2164                if (bfa_fcport_send_disable(fcport))
2165                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2166                else
2167                        bfa_sm_set_state(fcport,
2168                                         bfa_fcport_sm_disabling_qwait);
2169
2170                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2171                                BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2172                wwn2str(pwwn_buf, fcport->pwwn);
2173                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2174                        "Base port disabled: WWN = %s\n", pwwn_buf);
2175                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2176                break;
2177
2178        case BFA_FCPORT_SM_STOP:
2179                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2180                break;
2181
2182        case BFA_FCPORT_SM_HWFAIL:
2183                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2184                break;
2185
2186        case BFA_FCPORT_SM_FAA_MISCONFIG:
2187                bfa_fcport_reset_linkinfo(fcport);
2188                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2189                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2190                break;
2191
2192        default:
2193                bfa_sm_fault(fcport->bfa, event);
2194        }
2195}
2196
2197static void
2198bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2199                                                enum bfa_fcport_sm_event event)
2200{
2201        struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2202        char pwwn_buf[BFA_STRING_32];
2203        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2204
2205        bfa_trc(fcport->bfa, event);
2206
2207        switch (event) {
2208        case BFA_FCPORT_SM_LINKUP:
2209                bfa_fcport_update_linkinfo(fcport);
2210                bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2211                WARN_ON(!fcport->event_cbfn);
2212                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2213                                BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2214                if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2215
2216                        bfa_trc(fcport->bfa,
2217                                pevent->link_state.attr.vc_fcf.fcf.fipenabled);
2218                        bfa_trc(fcport->bfa,
2219                                pevent->link_state.attr.vc_fcf.fcf.fipfailed);
2220
2221                        if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
2222                                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2223                                        BFA_PL_EID_FIP_FCF_DISC, 0,
2224                                        "FIP FCF Discovery Failed");
2225                        else
2226                                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2227                                        BFA_PL_EID_FIP_FCF_DISC, 0,
2228                                        "FIP FCF Discovered");
2229                }
2230
2231                bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2232                wwn2str(pwwn_buf, fcport->pwwn);
2233                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2234                        "Base port online: WWN = %s\n", pwwn_buf);
2235                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2236
2237                /* If QoS is enabled and it is not online, send AEN */
2238                if (fcport->cfg.qos_enabled &&
2239                    fcport->qos_attr.state != BFA_QOS_ONLINE)
2240                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2241                break;
2242
2243        case BFA_FCPORT_SM_LINKDOWN:
2244                /*
2245                 * Possible to get link down event.
2246                 */
2247                break;
2248
2249        case BFA_FCPORT_SM_ENABLE:
2250                /*
2251                 * Already enabled.
2252                 */
2253                break;
2254
2255        case BFA_FCPORT_SM_DISABLE:
2256                if (bfa_fcport_send_disable(fcport))
2257                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2258                else
2259                        bfa_sm_set_state(fcport,
2260                                         bfa_fcport_sm_disabling_qwait);
2261
2262                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2263                                BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2264                wwn2str(pwwn_buf, fcport->pwwn);
2265                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2266                        "Base port disabled: WWN = %s\n", pwwn_buf);
2267                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2268                break;
2269
2270        case BFA_FCPORT_SM_STOP:
2271                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2272                break;
2273
2274        case BFA_FCPORT_SM_HWFAIL:
2275                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2276                break;
2277
2278        case BFA_FCPORT_SM_FAA_MISCONFIG:
2279                bfa_fcport_reset_linkinfo(fcport);
2280                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2281                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2282                break;
2283
2284        default:
2285                bfa_sm_fault(fcport->bfa, event);
2286        }
2287}
2288
2289static void
2290bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2291        enum bfa_fcport_sm_event event)
2292{
2293        char pwwn_buf[BFA_STRING_32];
2294        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2295
2296        bfa_trc(fcport->bfa, event);
2297
2298        switch (event) {
2299        case BFA_FCPORT_SM_ENABLE:
2300                /*
2301                 * Already enabled.
2302                 */
2303                break;
2304
2305        case BFA_FCPORT_SM_DISABLE:
2306                if (bfa_fcport_send_disable(fcport))
2307                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2308                else
2309                        bfa_sm_set_state(fcport,
2310                                         bfa_fcport_sm_disabling_qwait);
2311
2312                bfa_fcport_reset_linkinfo(fcport);
2313                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2314                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2315                                BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2316                wwn2str(pwwn_buf, fcport->pwwn);
2317                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2318                        "Base port offline: WWN = %s\n", pwwn_buf);
2319                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2320                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2321                        "Base port disabled: WWN = %s\n", pwwn_buf);
2322                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2323                break;
2324
2325        case BFA_FCPORT_SM_LINKDOWN:
2326                bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2327                bfa_fcport_reset_linkinfo(fcport);
2328                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2329                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2330                                BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2331                wwn2str(pwwn_buf, fcport->pwwn);
2332                if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2333                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2334                                "Base port offline: WWN = %s\n", pwwn_buf);
2335                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2336                } else {
2337                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2338                                "Base port (WWN = %s) "
2339                                "lost fabric connectivity\n", pwwn_buf);
2340                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2341                }
2342                break;
2343
2344        case BFA_FCPORT_SM_STOP:
2345                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2346                bfa_fcport_reset_linkinfo(fcport);
2347                wwn2str(pwwn_buf, fcport->pwwn);
2348                if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2349                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2350                                "Base port offline: WWN = %s\n", pwwn_buf);
2351                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2352                } else {
2353                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2354                                "Base port (WWN = %s) "
2355                                "lost fabric connectivity\n", pwwn_buf);
2356                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2357                }
2358                break;
2359
2360        case BFA_FCPORT_SM_HWFAIL:
2361                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2362                bfa_fcport_reset_linkinfo(fcport);
2363                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2364                wwn2str(pwwn_buf, fcport->pwwn);
2365                if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2366                        BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2367                                "Base port offline: WWN = %s\n", pwwn_buf);
2368                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2369                } else {
2370                        BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2371                                "Base port (WWN = %s) "
2372                                "lost fabric connectivity\n", pwwn_buf);
2373                        bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2374                }
2375                break;
2376
2377        case BFA_FCPORT_SM_FAA_MISCONFIG:
2378                bfa_fcport_reset_linkinfo(fcport);
2379                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2380                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2381                break;
2382
2383        default:
2384                bfa_sm_fault(fcport->bfa, event);
2385        }
2386}
2387
2388static void
2389bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2390                                 enum bfa_fcport_sm_event event)
2391{
2392        bfa_trc(fcport->bfa, event);
2393
2394        switch (event) {
2395        case BFA_FCPORT_SM_QRESUME:
2396                bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2397                bfa_fcport_send_disable(fcport);
2398                break;
2399
2400        case BFA_FCPORT_SM_STOP:
2401                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2402                bfa_reqq_wcancel(&fcport->reqq_wait);
2403                break;
2404
2405        case BFA_FCPORT_SM_ENABLE:
2406                bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2407                break;
2408
2409        case BFA_FCPORT_SM_DISABLE:
2410                /*
2411                 * Already being disabled.
2412                 */
2413                break;
2414
2415        case BFA_FCPORT_SM_LINKUP:
2416        case BFA_FCPORT_SM_LINKDOWN:
2417                /*
2418                 * Possible to get link events when doing back-to-back
2419                 * enable/disables.
2420                 */
2421                break;
2422
2423        case BFA_FCPORT_SM_HWFAIL:
2424                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2425                bfa_reqq_wcancel(&fcport->reqq_wait);
2426                break;
2427
2428        case BFA_FCPORT_SM_FAA_MISCONFIG:
2429                bfa_fcport_reset_linkinfo(fcport);
2430                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2431                bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2432                break;
2433
2434        default:
2435                bfa_sm_fault(fcport->bfa, event);
2436        }
2437}
2438
2439static void
2440bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2441                                 enum bfa_fcport_sm_event event)
2442{
2443        bfa_trc(fcport->bfa, event);
2444
2445        switch (event) {
2446        case BFA_FCPORT_SM_QRESUME:
2447                bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2448                bfa_fcport_send_disable(fcport);
2449                if (bfa_fcport_send_enable(fcport))
2450                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2451                else
2452                        bfa_sm_set_state(fcport,
2453                                         bfa_fcport_sm_enabling_qwait);
2454                break;
2455
2456        case BFA_FCPORT_SM_STOP:
2457                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2458                bfa_reqq_wcancel(&fcport->reqq_wait);
2459                break;
2460
2461        case BFA_FCPORT_SM_ENABLE:
2462                break;
2463
2464        case BFA_FCPORT_SM_DISABLE:
2465                bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2466                break;
2467
2468        case BFA_FCPORT_SM_LINKUP:
2469        case BFA_FCPORT_SM_LINKDOWN:
2470                /*
2471                 * Possible to get link events when doing back-to-back
2472                 * enable/disables.
2473                 */
2474                break;
2475
2476        case BFA_FCPORT_SM_HWFAIL:
2477                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2478                bfa_reqq_wcancel(&fcport->reqq_wait);
2479                break;
2480
2481        default:
2482                bfa_sm_fault(fcport->bfa, event);
2483        }
2484}
2485
2486static void
2487bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2488                                                enum bfa_fcport_sm_event event)
2489{
2490        char pwwn_buf[BFA_STRING_32];
2491        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2492        bfa_trc(fcport->bfa, event);
2493
2494        switch (event) {
2495        case BFA_FCPORT_SM_FWRSP:
2496                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2497                break;
2498
2499        case BFA_FCPORT_SM_DISABLE:
2500                /*
2501                 * Already being disabled.
2502                 */
2503                break;
2504
2505        case BFA_FCPORT_SM_ENABLE:
2506                if (bfa_fcport_send_enable(fcport))
2507                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2508                else
2509                        bfa_sm_set_state(fcport,
2510                                         bfa_fcport_sm_enabling_qwait);
2511
2512                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2513                                BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2514                wwn2str(pwwn_buf, fcport->pwwn);
2515                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2516                        "Base port enabled: WWN = %s\n", pwwn_buf);
2517                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2518                break;
2519
2520        case BFA_FCPORT_SM_STOP:
2521                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2522                break;
2523
2524        case BFA_FCPORT_SM_LINKUP:
2525        case BFA_FCPORT_SM_LINKDOWN:
2526                /*
2527                 * Possible to get link events when doing back-to-back
2528                 * enable/disables.
2529                 */
2530                break;
2531
2532        case BFA_FCPORT_SM_HWFAIL:
2533                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2534                break;
2535
2536        default:
2537                bfa_sm_fault(fcport->bfa, event);
2538        }
2539}
2540
2541static void
2542bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2543                                                enum bfa_fcport_sm_event event)
2544{
2545        char pwwn_buf[BFA_STRING_32];
2546        struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2547        bfa_trc(fcport->bfa, event);
2548
2549        switch (event) {
2550        case BFA_FCPORT_SM_START:
2551                /*
2552                 * Ignore start event for a port that is disabled.
2553                 */
2554                break;
2555
2556        case BFA_FCPORT_SM_STOP:
2557                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2558                break;
2559
2560        case BFA_FCPORT_SM_ENABLE:
2561                if (bfa_fcport_send_enable(fcport))
2562                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2563                else
2564                        bfa_sm_set_state(fcport,
2565                                         bfa_fcport_sm_enabling_qwait);
2566
2567                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2568                                BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2569                wwn2str(pwwn_buf, fcport->pwwn);
2570                BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2571                        "Base port enabled: WWN = %s\n", pwwn_buf);
2572                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2573                break;
2574
2575        case BFA_FCPORT_SM_DISABLE:
2576                /*
2577                 * Already disabled.
2578                 */
2579                break;
2580
2581        case BFA_FCPORT_SM_HWFAIL:
2582                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2583                break;
2584
2585        case BFA_FCPORT_SM_DPORTENABLE:
2586                bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2587                break;
2588
2589        case BFA_FCPORT_SM_DDPORTENABLE:
2590                bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
2591                break;
2592
2593        default:
2594                bfa_sm_fault(fcport->bfa, event);
2595        }
2596}
2597
2598static void
2599bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2600                         enum bfa_fcport_sm_event event)
2601{
2602        bfa_trc(fcport->bfa, event);
2603
2604        switch (event) {
2605        case BFA_FCPORT_SM_START:
2606                if (bfa_fcport_send_enable(fcport))
2607                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2608                else
2609                        bfa_sm_set_state(fcport,
2610                                         bfa_fcport_sm_enabling_qwait);
2611                break;
2612
2613        default:
2614                /*
2615                 * Ignore all other events.
2616                 */
2617                ;
2618        }
2619}
2620
2621/*
2622 * Port is enabled. IOC is down/failed.
2623 */
2624static void
2625bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2626                         enum bfa_fcport_sm_event event)
2627{
2628        bfa_trc(fcport->bfa, event);
2629
2630        switch (event) {
2631        case BFA_FCPORT_SM_START:
2632                if (bfa_fcport_send_enable(fcport))
2633                        bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2634                else
2635                        bfa_sm_set_state(fcport,
2636                                         bfa_fcport_sm_enabling_qwait);
2637                break;
2638
2639        default:
2640                /*
2641                 * Ignore all events.
2642                 */
2643                ;
2644        }
2645}
2646
2647/*
2648 * Port is disabled. IOC is down/failed.
2649 */
2650static void
2651bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2652                         enum bfa_fcport_sm_event event)
2653{
2654        bfa_trc(fcport->bfa, event);
2655
2656        switch (event) {
2657        case BFA_FCPORT_SM_START:
2658                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2659                break;
2660
2661        case BFA_FCPORT_SM_ENABLE:
2662                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2663                break;
2664
2665        default:
2666                /*
2667                 * Ignore all events.
2668                 */
2669                ;
2670        }
2671}
2672
2673static void
2674bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2675{
2676        bfa_trc(fcport->bfa, event);
2677
2678        switch (event) {
2679        case BFA_FCPORT_SM_DPORTENABLE:
2680        case BFA_FCPORT_SM_DISABLE:
2681        case BFA_FCPORT_SM_ENABLE:
2682        case BFA_FCPORT_SM_START:
2683                /*
2684                 * Ignore event for a port that is dport
2685                 */
2686                break;
2687
2688        case BFA_FCPORT_SM_STOP:
2689                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2690                break;
2691
2692        case BFA_FCPORT_SM_HWFAIL:
2693                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2694                break;
2695
2696        case BFA_FCPORT_SM_DPORTDISABLE:
2697                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2698                break;
2699
2700        default:
2701                bfa_sm_fault(fcport->bfa, event);
2702        }
2703}
2704
2705static void
2706bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
2707                        enum bfa_fcport_sm_event event)
2708{
2709        bfa_trc(fcport->bfa, event);
2710
2711        switch (event) {
2712        case BFA_FCPORT_SM_DISABLE:
2713        case BFA_FCPORT_SM_DDPORTDISABLE:
2714                bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2715                break;
2716
2717        case BFA_FCPORT_SM_DPORTENABLE:
2718        case BFA_FCPORT_SM_DPORTDISABLE:
2719        case BFA_FCPORT_SM_ENABLE:
2720        case BFA_FCPORT_SM_START:
2721                /**
2722                 * Ignore event for a port that is ddport
2723                 */
2724                break;
2725
2726        case BFA_FCPORT_SM_STOP:
2727                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2728                break;
2729
2730        case BFA_FCPORT_SM_HWFAIL:
2731                bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2732                break;
2733
2734        default:
2735                bfa_sm_fault(fcport->bfa, event);
2736        }
2737}
2738
2739static void
2740bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
2741                            enum bfa_fcport_sm_event event)
2742{
2743        bfa_trc(fcport->bfa, event);
2744
2745        switch (event) {
2746        case BFA_FCPORT_SM_DPORTENABLE:
2747        case BFA_FCPORT_SM_ENABLE:
2748        case BFA_FCPORT_SM_START:
2749                /*
2750                 * Ignore event for a port as there is FAA misconfig
2751                 */
2752                break;
2753
2754        case BFA_FCPORT_SM_DISABLE:
2755                if (bfa_fcport_send_disable(fcport))
2756                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2757                else
2758                        bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2759
2760                bfa_fcport_reset_linkinfo(fcport);
2761                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2762                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2763                             BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2764                bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2765                break;
2766
2767        case BFA_FCPORT_SM_STOP:
2768                bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2769                break;
2770
2771        case BFA_FCPORT_SM_HWFAIL:
2772                bfa_fcport_reset_linkinfo(fcport);
2773                bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2774                bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2775                break;
2776
2777        default:
2778                bfa_sm_fault(fcport->bfa, event);
2779        }
2780}
2781
2782/*
2783 * Link state is down
2784 */
2785static void
2786bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2787                enum bfa_fcport_ln_sm_event event)
2788{
2789        bfa_trc(ln->fcport->bfa, event);
2790
2791        switch (event) {
2792        case BFA_FCPORT_LN_SM_LINKUP:
2793                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2794                bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2795                break;
2796
2797        default:
2798                bfa_sm_fault(ln->fcport->bfa, event);
2799        }
2800}
2801
2802/*
2803 * Link state is waiting for down notification
2804 */
2805static void
2806bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2807                enum bfa_fcport_ln_sm_event event)
2808{
2809        bfa_trc(ln->fcport->bfa, event);
2810
2811        switch (event) {
2812        case BFA_FCPORT_LN_SM_LINKUP:
2813                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2814                break;
2815
2816        case BFA_FCPORT_LN_SM_NOTIFICATION:
2817                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2818                break;
2819
2820        default:
2821                bfa_sm_fault(ln->fcport->bfa, event);
2822        }
2823}
2824
2825/*
2826 * Link state is waiting for down notification and there is a pending up
2827 */
2828static void
2829bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2830                enum bfa_fcport_ln_sm_event event)
2831{
2832        bfa_trc(ln->fcport->bfa, event);
2833
2834        switch (event) {
2835        case BFA_FCPORT_LN_SM_LINKDOWN:
2836                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2837                break;
2838
2839        case BFA_FCPORT_LN_SM_NOTIFICATION:
2840                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2841                bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2842                break;
2843
2844        default:
2845                bfa_sm_fault(ln->fcport->bfa, event);
2846        }
2847}
2848
2849/*
2850 * Link state is up
2851 */
2852static void
2853bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2854                enum bfa_fcport_ln_sm_event event)
2855{
2856        bfa_trc(ln->fcport->bfa, event);
2857
2858        switch (event) {
2859        case BFA_FCPORT_LN_SM_LINKDOWN:
2860                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2861                bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2862                break;
2863
2864        default:
2865                bfa_sm_fault(ln->fcport->bfa, event);
2866        }
2867}
2868
2869/*
2870 * Link state is waiting for up notification
2871 */
2872static void
2873bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2874                enum bfa_fcport_ln_sm_event event)
2875{
2876        bfa_trc(ln->fcport->bfa, event);
2877
2878        switch (event) {
2879        case BFA_FCPORT_LN_SM_LINKDOWN:
2880                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2881                break;
2882
2883        case BFA_FCPORT_LN_SM_NOTIFICATION:
2884                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2885                break;
2886
2887        default:
2888                bfa_sm_fault(ln->fcport->bfa, event);
2889        }
2890}
2891
2892/*
2893 * Link state is waiting for up notification and there is a pending down
2894 */
2895static void
2896bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2897                enum bfa_fcport_ln_sm_event event)
2898{
2899        bfa_trc(ln->fcport->bfa, event);
2900
2901        switch (event) {
2902        case BFA_FCPORT_LN_SM_LINKUP:
2903                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2904                break;
2905
2906        case BFA_FCPORT_LN_SM_NOTIFICATION:
2907                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2908                bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2909                break;
2910
2911        default:
2912                bfa_sm_fault(ln->fcport->bfa, event);
2913        }
2914}
2915
2916/*
2917 * Link state is waiting for up notification and there are pending down and up
2918 */
2919static void
2920bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2921                        enum bfa_fcport_ln_sm_event event)
2922{
2923        bfa_trc(ln->fcport->bfa, event);
2924
2925        switch (event) {
2926        case BFA_FCPORT_LN_SM_LINKDOWN:
2927                bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2928                break;
2929
2930        case BFA_FCPORT_LN_SM_NOTIFICATION:
2931                bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2932                bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2933                break;
2934
2935        default:
2936                bfa_sm_fault(ln->fcport->bfa, event);
2937        }
2938}
2939
2940static void
2941__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2942{
2943        struct bfa_fcport_ln_s *ln = cbarg;
2944
2945        if (complete)
2946                ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2947        else
2948                bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2949}
2950
2951/*
2952 * Send SCN notification to upper layers.
2953 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2954 */
2955static void
2956bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2957        bfa_boolean_t trunk)
2958{
2959        if (fcport->cfg.trunked && !trunk)
2960                return;
2961
2962        switch (event) {
2963        case BFA_PORT_LINKUP:
2964                bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2965                break;
2966        case BFA_PORT_LINKDOWN:
2967                bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2968                break;
2969        default:
2970                WARN_ON(1);
2971        }
2972}
2973
2974static void
2975bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2976{
2977        struct bfa_fcport_s *fcport = ln->fcport;
2978
2979        if (fcport->bfa->fcs) {
2980                fcport->event_cbfn(fcport->event_cbarg, event);
2981                bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2982        } else {
2983                ln->ln_event = event;
2984                bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2985                        __bfa_cb_fcport_event, ln);
2986        }
2987}
2988
2989#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2990                                                        BFA_CACHELINE_SZ))
2991
2992void
2993bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2994                   struct bfa_s *bfa)
2995{
2996        struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2997
2998        bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
2999}
3000
3001static void
3002bfa_fcport_qresume(void *cbarg)
3003{
3004        struct bfa_fcport_s *fcport = cbarg;
3005
3006        bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
3007}
3008
3009static void
3010bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
3011{
3012        struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
3013
3014        fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
3015        fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
3016        fcport->stats = (union bfa_fcport_stats_u *)
3017                                bfa_mem_dma_virt(fcport_dma);
3018}
3019
3020/*
3021 * Memory initialization.
3022 */
3023void
3024bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3025                struct bfa_pcidev_s *pcidev)
3026{
3027        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3028        struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
3029        struct bfa_fcport_ln_s *ln = &fcport->ln;
3030
3031        fcport->bfa = bfa;
3032        ln->fcport = fcport;
3033
3034        bfa_fcport_mem_claim(fcport);
3035
3036        bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
3037        bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
3038
3039        /*
3040         * initialize time stamp for stats reset
3041         */
3042        fcport->stats_reset_time = ktime_get_seconds();
3043        fcport->stats_dma_ready = BFA_FALSE;
3044
3045        /*
3046         * initialize and set default configuration
3047         */
3048        port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
3049        port_cfg->speed = BFA_PORT_SPEED_AUTO;
3050        port_cfg->trunked = BFA_FALSE;
3051        port_cfg->maxfrsize = 0;
3052
3053        port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3054        port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
3055        port_cfg->qos_bw.med = BFA_QOS_BW_MED;
3056        port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
3057
3058        fcport->fec_state = BFA_FEC_OFFLINE;
3059
3060        INIT_LIST_HEAD(&fcport->stats_pending_q);
3061        INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3062
3063        bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
3064}
3065
3066void
3067bfa_fcport_start(struct bfa_s *bfa)
3068{
3069        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
3070}
3071
3072/*
3073 * Called when IOC failure is detected.
3074 */
3075void
3076bfa_fcport_iocdisable(struct bfa_s *bfa)
3077{
3078        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3079
3080        bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
3081        bfa_trunk_iocdisable(bfa);
3082}
3083
3084/*
3085 * Update loop info in fcport for SCN online
3086 */
3087static void
3088bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3089                        struct bfa_fcport_loop_info_s *loop_info)
3090{
3091        fcport->myalpa = loop_info->myalpa;
3092        fcport->alpabm_valid =
3093                        loop_info->alpabm_val;
3094        memcpy(fcport->alpabm.alpa_bm,
3095                        loop_info->alpabm.alpa_bm,
3096                        sizeof(struct fc_alpabm_s));
3097}
3098
3099static void
3100bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3101{
3102        struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
3103        struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3104
3105        fcport->speed = pevent->link_state.speed;
3106        fcport->topology = pevent->link_state.topology;
3107
3108        if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3109                bfa_fcport_update_loop_info(fcport,
3110                                &pevent->link_state.attr.loop_info);
3111                return;
3112        }
3113
3114        /* QoS Details */
3115        fcport->qos_attr = pevent->link_state.qos_attr;
3116        fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
3117
3118        if (fcport->cfg.bb_cr_enabled)
3119                fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
3120
3121        fcport->fec_state = pevent->link_state.fec_state;
3122
3123        /*
3124         * update trunk state if applicable
3125         */
3126        if (!fcport->cfg.trunked)
3127                trunk->attr.state = BFA_TRUNK_DISABLED;
3128
3129        /* update FCoE specific */
3130        fcport->fcoe_vlan =
3131                be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
3132
3133        bfa_trc(fcport->bfa, fcport->speed);
3134        bfa_trc(fcport->bfa, fcport->topology);
3135}
3136
3137static void
3138bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3139{
3140        fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3141        fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3142        fcport->fec_state = BFA_FEC_OFFLINE;
3143}
3144
3145/*
3146 * Send port enable message to firmware.
3147 */
3148static bfa_boolean_t
3149bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3150{
3151        struct bfi_fcport_enable_req_s *m;
3152
3153        /*
3154         * Increment message tag before queue check, so that responses to old
3155         * requests are discarded.
3156         */
3157        fcport->msgtag++;
3158
3159        /*
3160         * check for room in queue to send request now
3161         */
3162        m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3163        if (!m) {
3164                bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3165                                                        &fcport->reqq_wait);
3166                return BFA_FALSE;
3167        }
3168
3169        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3170                        bfa_fn_lpu(fcport->bfa));
3171        m->nwwn = fcport->nwwn;
3172        m->pwwn = fcport->pwwn;
3173        m->port_cfg = fcport->cfg;
3174        m->msgtag = fcport->msgtag;
3175        m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3176         m->use_flash_cfg = fcport->use_flash_cfg;
3177        bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3178        bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3179        bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3180
3181        /*
3182         * queue I/O message to firmware
3183         */
3184        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3185        return BFA_TRUE;
3186}
3187
3188/*
3189 * Send port disable message to firmware.
3190 */
3191static  bfa_boolean_t
3192bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3193{
3194        struct bfi_fcport_req_s *m;
3195
3196        /*
3197         * Increment message tag before queue check, so that responses to old
3198         * requests are discarded.
3199         */
3200        fcport->msgtag++;
3201
3202        /*
3203         * check for room in queue to send request now
3204         */
3205        m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3206        if (!m) {
3207                bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3208                                                        &fcport->reqq_wait);
3209                return BFA_FALSE;
3210        }
3211
3212        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3213                        bfa_fn_lpu(fcport->bfa));
3214        m->msgtag = fcport->msgtag;
3215
3216        /*
3217         * queue I/O message to firmware
3218         */
3219        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3220
3221        return BFA_TRUE;
3222}
3223
3224static void
3225bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3226{
3227        fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3228        fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3229
3230        bfa_trc(fcport->bfa, fcport->pwwn);
3231        bfa_trc(fcport->bfa, fcport->nwwn);
3232}
3233
3234static void
3235bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3236        struct bfa_qos_stats_s *s)
3237{
3238        u32     *dip = (u32 *) d;
3239        __be32  *sip = (__be32 *) s;
3240        int             i;
3241
3242        /* Now swap the 32 bit fields */
3243        for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3244                dip[i] = be32_to_cpu(sip[i]);
3245}
3246
3247static void
3248bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3249        struct bfa_fcoe_stats_s *s)
3250{
3251        u32     *dip = (u32 *) d;
3252        __be32  *sip = (__be32 *) s;
3253        int             i;
3254
3255        for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3256             i = i + 2) {
3257#ifdef __BIG_ENDIAN
3258                dip[i] = be32_to_cpu(sip[i]);
3259                dip[i + 1] = be32_to_cpu(sip[i + 1]);
3260#else
3261                dip[i] = be32_to_cpu(sip[i + 1]);
3262                dip[i + 1] = be32_to_cpu(sip[i]);
3263#endif
3264        }
3265}
3266
3267static void
3268__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3269{
3270        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3271        struct bfa_cb_pending_q_s *cb;
3272        struct list_head *qe, *qen;
3273        union bfa_fcport_stats_u *ret;
3274
3275        if (complete) {
3276                time64_t time = ktime_get_seconds();
3277
3278                list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3279                        bfa_q_deq(&fcport->stats_pending_q, &qe);
3280                        cb = (struct bfa_cb_pending_q_s *)qe;
3281                        if (fcport->stats_status == BFA_STATUS_OK) {
3282                                ret = (union bfa_fcport_stats_u *)cb->data;
3283                                /* Swap FC QoS or FCoE stats */
3284                                if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3285                                        bfa_fcport_qos_stats_swap(&ret->fcqos,
3286                                                        &fcport->stats->fcqos);
3287                                else {
3288                                        bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3289                                                        &fcport->stats->fcoe);
3290                                        ret->fcoe.secs_reset =
3291                                                time - fcport->stats_reset_time;
3292                                }
3293                        }
3294                        bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3295                                        fcport->stats_status);
3296                }
3297                fcport->stats_status = BFA_STATUS_OK;
3298        } else {
3299                INIT_LIST_HEAD(&fcport->stats_pending_q);
3300                fcport->stats_status = BFA_STATUS_OK;
3301        }
3302}
3303
3304static void
3305bfa_fcport_stats_get_timeout(void *cbarg)
3306{
3307        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3308
3309        bfa_trc(fcport->bfa, fcport->stats_qfull);
3310
3311        if (fcport->stats_qfull) {
3312                bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3313                fcport->stats_qfull = BFA_FALSE;
3314        }
3315
3316        fcport->stats_status = BFA_STATUS_ETIMER;
3317        __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3318}
3319
3320static void
3321bfa_fcport_send_stats_get(void *cbarg)
3322{
3323        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3324        struct bfi_fcport_req_s *msg;
3325
3326        msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3327
3328        if (!msg) {
3329                fcport->stats_qfull = BFA_TRUE;
3330                bfa_reqq_winit(&fcport->stats_reqq_wait,
3331                                bfa_fcport_send_stats_get, fcport);
3332                bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3333                                &fcport->stats_reqq_wait);
3334                return;
3335        }
3336        fcport->stats_qfull = BFA_FALSE;
3337
3338        memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3339        bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3340                        bfa_fn_lpu(fcport->bfa));
3341        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3342}
3343
3344static void
3345__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3346{
3347        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3348        struct bfa_cb_pending_q_s *cb;
3349        struct list_head *qe, *qen;
3350
3351        if (complete) {
3352                /*
3353                 * re-initialize time stamp for stats reset
3354                 */
3355                fcport->stats_reset_time = ktime_get_seconds();
3356                list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3357                        bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3358                        cb = (struct bfa_cb_pending_q_s *)qe;
3359                        bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3360                                                fcport->stats_status);
3361                }
3362                fcport->stats_status = BFA_STATUS_OK;
3363        } else {
3364                INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3365                fcport->stats_status = BFA_STATUS_OK;
3366        }
3367}
3368
3369static void
3370bfa_fcport_stats_clr_timeout(void *cbarg)
3371{
3372        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3373
3374        bfa_trc(fcport->bfa, fcport->stats_qfull);
3375
3376        if (fcport->stats_qfull) {
3377                bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3378                fcport->stats_qfull = BFA_FALSE;
3379        }
3380
3381        fcport->stats_status = BFA_STATUS_ETIMER;
3382        __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3383}
3384
3385static void
3386bfa_fcport_send_stats_clear(void *cbarg)
3387{
3388        struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3389        struct bfi_fcport_req_s *msg;
3390
3391        msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3392
3393        if (!msg) {
3394                fcport->stats_qfull = BFA_TRUE;
3395                bfa_reqq_winit(&fcport->stats_reqq_wait,
3396                                bfa_fcport_send_stats_clear, fcport);
3397                bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3398                                                &fcport->stats_reqq_wait);
3399                return;
3400        }
3401        fcport->stats_qfull = BFA_FALSE;
3402
3403        memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3404        bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3405                        bfa_fn_lpu(fcport->bfa));
3406        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3407}
3408
3409/*
3410 * Handle trunk SCN event from firmware.
3411 */
3412static void
3413bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3414{
3415        struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3416        struct bfi_fcport_trunk_link_s *tlink;
3417        struct bfa_trunk_link_attr_s *lattr;
3418        enum bfa_trunk_state state_prev;
3419        int i;
3420        int link_bm = 0;
3421
3422        bfa_trc(fcport->bfa, fcport->cfg.trunked);
3423        WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3424                   scn->trunk_state != BFA_TRUNK_OFFLINE);
3425
3426        bfa_trc(fcport->bfa, trunk->attr.state);
3427        bfa_trc(fcport->bfa, scn->trunk_state);
3428        bfa_trc(fcport->bfa, scn->trunk_speed);
3429
3430        /*
3431         * Save off new state for trunk attribute query
3432         */
3433        state_prev = trunk->attr.state;
3434        if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3435                trunk->attr.state = scn->trunk_state;
3436        trunk->attr.speed = scn->trunk_speed;
3437        for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3438                lattr = &trunk->attr.link_attr[i];
3439                tlink = &scn->tlink[i];
3440
3441                lattr->link_state = tlink->state;
3442                lattr->trunk_wwn  = tlink->trunk_wwn;
3443                lattr->fctl       = tlink->fctl;
3444                lattr->speed      = tlink->speed;
3445                lattr->deskew     = be32_to_cpu(tlink->deskew);
3446
3447                if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3448                        fcport->speed    = tlink->speed;
3449                        fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3450                        link_bm |= 1 << i;
3451                }
3452
3453                bfa_trc(fcport->bfa, lattr->link_state);
3454                bfa_trc(fcport->bfa, lattr->trunk_wwn);
3455                bfa_trc(fcport->bfa, lattr->fctl);
3456                bfa_trc(fcport->bfa, lattr->speed);
3457                bfa_trc(fcport->bfa, lattr->deskew);
3458        }
3459
3460        switch (link_bm) {
3461        case 3:
3462                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3463                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3464                break;
3465        case 2:
3466                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3467                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3468                break;
3469        case 1:
3470                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3471                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3472                break;
3473        default:
3474                bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3475                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3476        }
3477
3478        /*
3479         * Notify upper layers if trunk state changed.
3480         */
3481        if ((state_prev != trunk->attr.state) ||
3482                (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3483                bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3484                        BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3485        }
3486}
3487
3488static void
3489bfa_trunk_iocdisable(struct bfa_s *bfa)
3490{
3491        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3492        int i = 0;
3493
3494        /*
3495         * In trunked mode, notify upper layers that link is down
3496         */
3497        if (fcport->cfg.trunked) {
3498                if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3499                        bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3500
3501                fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3502                fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3503                for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3504                        fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3505                        fcport->trunk.attr.link_attr[i].fctl =
3506                                                BFA_TRUNK_LINK_FCTL_NORMAL;
3507                        fcport->trunk.attr.link_attr[i].link_state =
3508                                                BFA_TRUNK_LINK_STATE_DN_LINKDN;
3509                        fcport->trunk.attr.link_attr[i].speed =
3510                                                BFA_PORT_SPEED_UNKNOWN;
3511                        fcport->trunk.attr.link_attr[i].deskew = 0;
3512                }
3513        }
3514}
3515
3516/*
3517 * Called to initialize port attributes
3518 */
3519void
3520bfa_fcport_init(struct bfa_s *bfa)
3521{
3522        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3523
3524        /*
3525         * Initialize port attributes from IOC hardware data.
3526         */
3527        bfa_fcport_set_wwns(fcport);
3528        if (fcport->cfg.maxfrsize == 0)
3529                fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3530        fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3531        fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3532
3533        if (bfa_fcport_is_pbcdisabled(bfa))
3534                bfa->modules.port.pbc_disabled = BFA_TRUE;
3535
3536        WARN_ON(!fcport->cfg.maxfrsize);
3537        WARN_ON(!fcport->cfg.rx_bbcredit);
3538        WARN_ON(!fcport->speed_sup);
3539}
3540
3541/*
3542 * Firmware message handler.
3543 */
3544void
3545bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3546{
3547        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3548        union bfi_fcport_i2h_msg_u i2hmsg;
3549
3550        i2hmsg.msg = msg;
3551        fcport->event_arg.i2hmsg = i2hmsg;
3552
3553        bfa_trc(bfa, msg->mhdr.msg_id);
3554        bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3555
3556        switch (msg->mhdr.msg_id) {
3557        case BFI_FCPORT_I2H_ENABLE_RSP:
3558                if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3559
3560                        fcport->stats_dma_ready = BFA_TRUE;
3561                        if (fcport->use_flash_cfg) {
3562                                fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3563                                fcport->cfg.maxfrsize =
3564                                        cpu_to_be16(fcport->cfg.maxfrsize);
3565                                fcport->cfg.path_tov =
3566                                        cpu_to_be16(fcport->cfg.path_tov);
3567                                fcport->cfg.q_depth =
3568                                        cpu_to_be16(fcport->cfg.q_depth);
3569
3570                                if (fcport->cfg.trunked)
3571                                        fcport->trunk.attr.state =
3572                                                BFA_TRUNK_OFFLINE;
3573                                else
3574                                        fcport->trunk.attr.state =
3575                                                BFA_TRUNK_DISABLED;
3576                                fcport->qos_attr.qos_bw =
3577                                        i2hmsg.penable_rsp->port_cfg.qos_bw;
3578                                fcport->use_flash_cfg = BFA_FALSE;
3579                        }
3580
3581                        if (fcport->cfg.qos_enabled)
3582                                fcport->qos_attr.state = BFA_QOS_OFFLINE;
3583                        else
3584                                fcport->qos_attr.state = BFA_QOS_DISABLED;
3585
3586                        fcport->qos_attr.qos_bw_op =
3587                                        i2hmsg.penable_rsp->port_cfg.qos_bw;
3588
3589                        if (fcport->cfg.bb_cr_enabled)
3590                                fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3591                        else
3592                                fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3593
3594                        bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3595                }
3596                break;
3597
3598        case BFI_FCPORT_I2H_DISABLE_RSP:
3599                if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3600                        bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3601                break;
3602
3603        case BFI_FCPORT_I2H_EVENT:
3604                if (fcport->cfg.bb_cr_enabled)
3605                        fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3606                else
3607                        fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3608
3609                if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3610                        bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3611                else {
3612                        if (i2hmsg.event->link_state.linkstate_rsn ==
3613                            BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
3614                                bfa_sm_send_event(fcport,
3615                                                  BFA_FCPORT_SM_FAA_MISCONFIG);
3616                        else
3617                                bfa_sm_send_event(fcport,
3618                                                  BFA_FCPORT_SM_LINKDOWN);
3619                }
3620                fcport->qos_attr.qos_bw_op =
3621                                i2hmsg.event->link_state.qos_attr.qos_bw_op;
3622                break;
3623
3624        case BFI_FCPORT_I2H_TRUNK_SCN:
3625                bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3626                break;
3627
3628        case BFI_FCPORT_I2H_STATS_GET_RSP:
3629                /*
3630                 * check for timer pop before processing the rsp
3631                 */
3632                if (list_empty(&fcport->stats_pending_q) ||
3633                    (fcport->stats_status == BFA_STATUS_ETIMER))
3634                        break;
3635
3636                bfa_timer_stop(&fcport->timer);
3637                fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3638                __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3639                break;
3640
3641        case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3642                /*
3643                 * check for timer pop before processing the rsp
3644                 */
3645                if (list_empty(&fcport->statsclr_pending_q) ||
3646                    (fcport->stats_status == BFA_STATUS_ETIMER))
3647                        break;
3648
3649                bfa_timer_stop(&fcport->timer);
3650                fcport->stats_status = BFA_STATUS_OK;
3651                __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3652                break;
3653
3654        case BFI_FCPORT_I2H_ENABLE_AEN:
3655                bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3656                break;
3657
3658        case BFI_FCPORT_I2H_DISABLE_AEN:
3659                bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3660                break;
3661
3662        default:
3663                WARN_ON(1);
3664        break;
3665        }
3666}
3667
3668/*
3669 * Registered callback for port events.
3670 */
3671void
3672bfa_fcport_event_register(struct bfa_s *bfa,
3673                                void (*cbfn) (void *cbarg,
3674                                enum bfa_port_linkstate event),
3675                                void *cbarg)
3676{
3677        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3678
3679        fcport->event_cbfn = cbfn;
3680        fcport->event_cbarg = cbarg;
3681}
3682
3683bfa_status_t
3684bfa_fcport_enable(struct bfa_s *bfa)
3685{
3686        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3687
3688        if (bfa_fcport_is_pbcdisabled(bfa))
3689                return BFA_STATUS_PBC;
3690
3691        if (bfa_ioc_is_disabled(&bfa->ioc))
3692                return BFA_STATUS_IOC_DISABLED;
3693
3694        if (fcport->diag_busy)
3695                return BFA_STATUS_DIAG_BUSY;
3696
3697        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3698        return BFA_STATUS_OK;
3699}
3700
3701bfa_status_t
3702bfa_fcport_disable(struct bfa_s *bfa)
3703{
3704        if (bfa_fcport_is_pbcdisabled(bfa))
3705                return BFA_STATUS_PBC;
3706
3707        if (bfa_ioc_is_disabled(&bfa->ioc))
3708                return BFA_STATUS_IOC_DISABLED;
3709
3710        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3711        return BFA_STATUS_OK;
3712}
3713
3714/* If PBC is disabled on port, return error */
3715bfa_status_t
3716bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3717{
3718        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3719        struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3720        struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3721
3722        if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3723                bfa_trc(bfa, fcport->pwwn);
3724                return BFA_STATUS_PBC;
3725        }
3726        return BFA_STATUS_OK;
3727}
3728
3729/*
3730 * Configure port speed.
3731 */
3732bfa_status_t
3733bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3734{
3735        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3736
3737        bfa_trc(bfa, speed);
3738
3739        if (fcport->cfg.trunked == BFA_TRUE)
3740                return BFA_STATUS_TRUNK_ENABLED;
3741        if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3742                        (speed == BFA_PORT_SPEED_16GBPS))
3743                return BFA_STATUS_UNSUPP_SPEED;
3744        if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3745                bfa_trc(bfa, fcport->speed_sup);
3746                return BFA_STATUS_UNSUPP_SPEED;
3747        }
3748
3749        /* Port speed entered needs to be checked */
3750        if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3751                /* For CT2, 1G is not supported */
3752                if ((speed == BFA_PORT_SPEED_1GBPS) &&
3753                    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3754                        return BFA_STATUS_UNSUPP_SPEED;
3755
3756                /* Already checked for Auto Speed and Max Speed supp */
3757                if (!(speed == BFA_PORT_SPEED_1GBPS ||
3758                      speed == BFA_PORT_SPEED_2GBPS ||
3759                      speed == BFA_PORT_SPEED_4GBPS ||
3760                      speed == BFA_PORT_SPEED_8GBPS ||
3761                      speed == BFA_PORT_SPEED_16GBPS ||
3762                      speed == BFA_PORT_SPEED_AUTO))
3763                        return BFA_STATUS_UNSUPP_SPEED;
3764        } else {
3765                if (speed != BFA_PORT_SPEED_10GBPS)
3766                        return BFA_STATUS_UNSUPP_SPEED;
3767        }
3768
3769        fcport->cfg.speed = speed;
3770
3771        return BFA_STATUS_OK;
3772}
3773
3774/*
3775 * Get current speed.
3776 */
3777enum bfa_port_speed
3778bfa_fcport_get_speed(struct bfa_s *bfa)
3779{
3780        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3781
3782        return fcport->speed;
3783}
3784
3785/*
3786 * Configure port topology.
3787 */
3788bfa_status_t
3789bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3790{
3791        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3792
3793        bfa_trc(bfa, topology);
3794        bfa_trc(bfa, fcport->cfg.topology);
3795
3796        switch (topology) {
3797        case BFA_PORT_TOPOLOGY_P2P:
3798                break;
3799
3800        case BFA_PORT_TOPOLOGY_LOOP:
3801                if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3802                        (fcport->qos_attr.state != BFA_QOS_DISABLED))
3803                        return BFA_STATUS_ERROR_QOS_ENABLED;
3804                if (fcport->cfg.ratelimit != BFA_FALSE)
3805                        return BFA_STATUS_ERROR_TRL_ENABLED;
3806                if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3807                        (fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3808                        return BFA_STATUS_ERROR_TRUNK_ENABLED;
3809                if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3810                        (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3811                        return BFA_STATUS_UNSUPP_SPEED;
3812                if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3813                        return BFA_STATUS_LOOP_UNSUPP_MEZZ;
3814                if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3815                        return BFA_STATUS_DPORT_ERR;
3816                if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
3817                        return BFA_STATUS_DPORT_ERR;
3818                break;
3819
3820        case BFA_PORT_TOPOLOGY_AUTO:
3821                break;
3822
3823        default:
3824                return BFA_STATUS_EINVAL;
3825        }
3826
3827        fcport->cfg.topology = topology;
3828        return BFA_STATUS_OK;
3829}
3830
3831/*
3832 * Get current topology.
3833 */
3834enum bfa_port_topology
3835bfa_fcport_get_topology(struct bfa_s *bfa)
3836{
3837        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3838
3839        return fcport->topology;
3840}
3841
3842/**
3843 * Get config topology.
3844 */
3845enum bfa_port_topology
3846bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3847{
3848        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3849
3850        return fcport->cfg.topology;
3851}
3852
3853bfa_status_t
3854bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3855{
3856        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3857
3858        bfa_trc(bfa, alpa);
3859        bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3860        bfa_trc(bfa, fcport->cfg.hardalpa);
3861
3862        fcport->cfg.cfg_hardalpa = BFA_TRUE;
3863        fcport->cfg.hardalpa = alpa;
3864
3865        return BFA_STATUS_OK;
3866}
3867
3868bfa_status_t
3869bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3870{
3871        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3872
3873        bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3874        bfa_trc(bfa, fcport->cfg.hardalpa);
3875
3876        fcport->cfg.cfg_hardalpa = BFA_FALSE;
3877        return BFA_STATUS_OK;
3878}
3879
3880bfa_boolean_t
3881bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3882{
3883        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3884
3885        *alpa = fcport->cfg.hardalpa;
3886        return fcport->cfg.cfg_hardalpa;
3887}
3888
3889u8
3890bfa_fcport_get_myalpa(struct bfa_s *bfa)
3891{
3892        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3893
3894        return fcport->myalpa;
3895}
3896
3897bfa_status_t
3898bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3899{
3900        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3901
3902        bfa_trc(bfa, maxfrsize);
3903        bfa_trc(bfa, fcport->cfg.maxfrsize);
3904
3905        /* with in range */
3906        if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3907                return BFA_STATUS_INVLD_DFSZ;
3908
3909        /* power of 2, if not the max frame size of 2112 */
3910        if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3911                return BFA_STATUS_INVLD_DFSZ;
3912
3913        fcport->cfg.maxfrsize = maxfrsize;
3914        return BFA_STATUS_OK;
3915}
3916
3917u16
3918bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3919{
3920        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3921
3922        return fcport->cfg.maxfrsize;
3923}
3924
3925u8
3926bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3927{
3928        if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
3929                return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
3930
3931        else
3932                return 0;
3933}
3934
3935void
3936bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3937{
3938        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3939
3940        fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3941}
3942
3943/*
3944 * Get port attributes.
3945 */
3946
3947wwn_t
3948bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3949{
3950        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3951        if (node)
3952                return fcport->nwwn;
3953        else
3954                return fcport->pwwn;
3955}
3956
3957void
3958bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3959{
3960        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3961
3962        memset(attr, 0, sizeof(struct bfa_port_attr_s));
3963
3964        attr->nwwn = fcport->nwwn;
3965        attr->pwwn = fcport->pwwn;
3966
3967        attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
3968        attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
3969
3970        memcpy(&attr->pport_cfg, &fcport->cfg,
3971                sizeof(struct bfa_port_cfg_s));
3972        /* speed attributes */
3973        attr->pport_cfg.speed = fcport->cfg.speed;
3974        attr->speed_supported = fcport->speed_sup;
3975        attr->speed = fcport->speed;
3976        attr->cos_supported = FC_CLASS_3;
3977
3978        /* topology attributes */
3979        attr->pport_cfg.topology = fcport->cfg.topology;
3980        attr->topology = fcport->topology;
3981        attr->pport_cfg.trunked = fcport->cfg.trunked;
3982
3983        /* beacon attributes */
3984        attr->beacon = fcport->beacon;
3985        attr->link_e2e_beacon = fcport->link_e2e_beacon;
3986
3987        attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
3988        attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
3989        attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3990
3991        attr->fec_state = fcport->fec_state;
3992
3993        /* PBC Disabled State */
3994        if (bfa_fcport_is_pbcdisabled(bfa))
3995                attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3996        else {
3997                if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3998                        attr->port_state = BFA_PORT_ST_IOCDIS;
3999                else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
4000                        attr->port_state = BFA_PORT_ST_FWMISMATCH;
4001        }
4002
4003        /* FCoE vlan */
4004        attr->fcoe_vlan = fcport->fcoe_vlan;
4005}
4006
4007#define BFA_FCPORT_STATS_TOV    1000
4008
4009/*
4010 * Fetch port statistics (FCQoS or FCoE).
4011 */
4012bfa_status_t
4013bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4014{
4015        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4016
4017        if (!bfa_iocfc_is_operational(bfa) ||
4018            !fcport->stats_dma_ready)
4019                return BFA_STATUS_IOC_NON_OP;
4020
4021        if (!list_empty(&fcport->statsclr_pending_q))
4022                return BFA_STATUS_DEVBUSY;
4023
4024        if (list_empty(&fcport->stats_pending_q)) {
4025                list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4026                bfa_fcport_send_stats_get(fcport);
4027                bfa_timer_start(bfa, &fcport->timer,
4028                                bfa_fcport_stats_get_timeout,
4029                                fcport, BFA_FCPORT_STATS_TOV);
4030        } else
4031                list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4032
4033        return BFA_STATUS_OK;
4034}
4035
4036/*
4037 * Reset port statistics (FCQoS or FCoE).
4038 */
4039bfa_status_t
4040bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4041{
4042        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4043
4044        if (!bfa_iocfc_is_operational(bfa) ||
4045            !fcport->stats_dma_ready)
4046                return BFA_STATUS_IOC_NON_OP;
4047
4048        if (!list_empty(&fcport->stats_pending_q))
4049                return BFA_STATUS_DEVBUSY;
4050
4051        if (list_empty(&fcport->statsclr_pending_q)) {
4052                list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4053                bfa_fcport_send_stats_clear(fcport);
4054                bfa_timer_start(bfa, &fcport->timer,
4055                                bfa_fcport_stats_clr_timeout,
4056                                fcport, BFA_FCPORT_STATS_TOV);
4057        } else
4058                list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4059
4060        return BFA_STATUS_OK;
4061}
4062
4063/*
4064 * Fetch port attributes.
4065 */
4066bfa_boolean_t
4067bfa_fcport_is_disabled(struct bfa_s *bfa)
4068{
4069        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4070
4071        return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4072                BFA_PORT_ST_DISABLED;
4073
4074}
4075
4076bfa_boolean_t
4077bfa_fcport_is_dport(struct bfa_s *bfa)
4078{
4079        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4080
4081        return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4082                BFA_PORT_ST_DPORT);
4083}
4084
4085bfa_boolean_t
4086bfa_fcport_is_ddport(struct bfa_s *bfa)
4087{
4088        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4089
4090        return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4091                BFA_PORT_ST_DDPORT);
4092}
4093
4094bfa_status_t
4095bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
4096{
4097        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4098        enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
4099
4100        bfa_trc(bfa, ioc_type);
4101
4102        if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
4103                return BFA_STATUS_QOS_BW_INVALID;
4104
4105        if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
4106                return BFA_STATUS_QOS_BW_INVALID;
4107
4108        if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
4109            (qos_bw->low > qos_bw->high))
4110                return BFA_STATUS_QOS_BW_INVALID;
4111
4112        if ((ioc_type == BFA_IOC_TYPE_FC) &&
4113            (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
4114                fcport->cfg.qos_bw = *qos_bw;
4115
4116        return BFA_STATUS_OK;
4117}
4118
4119bfa_boolean_t
4120bfa_fcport_is_ratelim(struct bfa_s *bfa)
4121{
4122        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4123
4124        return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
4125
4126}
4127
4128/*
4129 *      Enable/Disable FAA feature in port config
4130 */
4131void
4132bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
4133{
4134        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4135
4136        bfa_trc(bfa, state);
4137        fcport->cfg.faa_state = state;
4138}
4139
4140/*
4141 * Get default minimum ratelim speed
4142 */
4143enum bfa_port_speed
4144bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
4145{
4146        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4147
4148        bfa_trc(bfa, fcport->cfg.trl_def_speed);
4149        return fcport->cfg.trl_def_speed;
4150
4151}
4152
4153void
4154bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4155                  bfa_boolean_t link_e2e_beacon)
4156{
4157        struct bfa_s *bfa = dev;
4158        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4159
4160        bfa_trc(bfa, beacon);
4161        bfa_trc(bfa, link_e2e_beacon);
4162        bfa_trc(bfa, fcport->beacon);
4163        bfa_trc(bfa, fcport->link_e2e_beacon);
4164
4165        fcport->beacon = beacon;
4166        fcport->link_e2e_beacon = link_e2e_beacon;
4167}
4168
4169bfa_boolean_t
4170bfa_fcport_is_linkup(struct bfa_s *bfa)
4171{
4172        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4173
4174        return  (!fcport->cfg.trunked &&
4175                 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4176                (fcport->cfg.trunked &&
4177                 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4178}
4179
4180bfa_boolean_t
4181bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4182{
4183        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4184
4185        return fcport->cfg.qos_enabled;
4186}
4187
4188bfa_boolean_t
4189bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
4190{
4191        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4192
4193        return fcport->cfg.trunked;
4194}
4195
4196bfa_status_t
4197bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
4198{
4199        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4200
4201        bfa_trc(bfa, on_off);
4202
4203        if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4204                return BFA_STATUS_BBCR_FC_ONLY;
4205
4206        if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
4207                (bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
4208                return BFA_STATUS_CMD_NOTSUPP_MEZZ;
4209
4210        if (on_off) {
4211                if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4212                        return BFA_STATUS_TOPOLOGY_LOOP;
4213
4214                if (fcport->cfg.qos_enabled)
4215                        return BFA_STATUS_ERROR_QOS_ENABLED;
4216
4217                if (fcport->cfg.trunked)
4218                        return BFA_STATUS_TRUNK_ENABLED;
4219
4220                if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
4221                        (fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
4222                        return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
4223
4224                if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
4225                        return BFA_STATUS_FEATURE_NOT_SUPPORTED;
4226
4227                if (fcport->cfg.bb_cr_enabled) {
4228                        if (bb_scn != fcport->cfg.bb_scn)
4229                                return BFA_STATUS_BBCR_CFG_NO_CHANGE;
4230                        else
4231                                return BFA_STATUS_NO_CHANGE;
4232                }
4233
4234                if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
4235                        bb_scn = BFA_BB_SCN_DEF;
4236
4237                fcport->cfg.bb_cr_enabled = on_off;
4238                fcport->cfg.bb_scn = bb_scn;
4239        } else {
4240                if (!fcport->cfg.bb_cr_enabled)
4241                        return BFA_STATUS_NO_CHANGE;
4242
4243                fcport->cfg.bb_cr_enabled = on_off;
4244                fcport->cfg.bb_scn = 0;
4245        }
4246
4247        return BFA_STATUS_OK;
4248}
4249
4250bfa_status_t
4251bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
4252                struct bfa_bbcr_attr_s *bbcr_attr)
4253{
4254        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4255
4256        if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4257                return BFA_STATUS_BBCR_FC_ONLY;
4258
4259        if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4260                return BFA_STATUS_TOPOLOGY_LOOP;
4261
4262        *bbcr_attr = fcport->bbcr_attr;
4263
4264        return BFA_STATUS_OK;
4265}
4266
4267void
4268bfa_fcport_dportenable(struct bfa_s *bfa)
4269{
4270        /*
4271         * Assume caller check for port is in disable state
4272         */
4273        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4274        bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4275}
4276
4277void
4278bfa_fcport_dportdisable(struct bfa_s *bfa)
4279{
4280        /*
4281         * Assume caller check for port is in disable state
4282         */
4283        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4284        bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4285}
4286
4287void
4288bfa_fcport_ddportenable(struct bfa_s *bfa)
4289{
4290        /*
4291         * Assume caller check for port is in disable state
4292         */
4293        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
4294}
4295
4296void
4297bfa_fcport_ddportdisable(struct bfa_s *bfa)
4298{
4299        /*
4300         * Assume caller check for port is in disable state
4301         */
4302        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
4303}
4304
4305/*
4306 * Rport State machine functions
4307 */
4308/*
4309 * Beginning state, only online event expected.
4310 */
4311static void
4312bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4313{
4314        bfa_trc(rp->bfa, rp->rport_tag);
4315        bfa_trc(rp->bfa, event);
4316
4317        switch (event) {
4318        case BFA_RPORT_SM_CREATE:
4319                bfa_stats(rp, sm_un_cr);
4320                bfa_sm_set_state(rp, bfa_rport_sm_created);
4321                break;
4322
4323        default:
4324                bfa_stats(rp, sm_un_unexp);
4325                bfa_sm_fault(rp->bfa, event);
4326        }
4327}
4328
4329static void
4330bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4331{
4332        bfa_trc(rp->bfa, rp->rport_tag);
4333        bfa_trc(rp->bfa, event);
4334
4335        switch (event) {
4336        case BFA_RPORT_SM_ONLINE:
4337                bfa_stats(rp, sm_cr_on);
4338                if (bfa_rport_send_fwcreate(rp))
4339                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4340                else
4341                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4342                break;
4343
4344        case BFA_RPORT_SM_DELETE:
4345                bfa_stats(rp, sm_cr_del);
4346                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4347                bfa_rport_free(rp);
4348                break;
4349
4350        case BFA_RPORT_SM_HWFAIL:
4351                bfa_stats(rp, sm_cr_hwf);
4352                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4353                break;
4354
4355        default:
4356                bfa_stats(rp, sm_cr_unexp);
4357                bfa_sm_fault(rp->bfa, event);
4358        }
4359}
4360
4361/*
4362 * Waiting for rport create response from firmware.
4363 */
4364static void
4365bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4366{
4367        bfa_trc(rp->bfa, rp->rport_tag);
4368        bfa_trc(rp->bfa, event);
4369
4370        switch (event) {
4371        case BFA_RPORT_SM_FWRSP:
4372                bfa_stats(rp, sm_fwc_rsp);
4373                bfa_sm_set_state(rp, bfa_rport_sm_online);
4374                bfa_rport_online_cb(rp);
4375                break;
4376
4377        case BFA_RPORT_SM_DELETE:
4378                bfa_stats(rp, sm_fwc_del);
4379                bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4380                break;
4381
4382        case BFA_RPORT_SM_OFFLINE:
4383                bfa_stats(rp, sm_fwc_off);
4384                bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4385                break;
4386
4387        case BFA_RPORT_SM_HWFAIL:
4388                bfa_stats(rp, sm_fwc_hwf);
4389                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4390                break;
4391
4392        default:
4393                bfa_stats(rp, sm_fwc_unexp);
4394                bfa_sm_fault(rp->bfa, event);
4395        }
4396}
4397
4398/*
4399 * Request queue is full, awaiting queue resume to send create request.
4400 */
4401static void
4402bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4403{
4404        bfa_trc(rp->bfa, rp->rport_tag);
4405        bfa_trc(rp->bfa, event);
4406
4407        switch (event) {
4408        case BFA_RPORT_SM_QRESUME:
4409                bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4410                bfa_rport_send_fwcreate(rp);
4411                break;
4412
4413        case BFA_RPORT_SM_DELETE:
4414                bfa_stats(rp, sm_fwc_del);
4415                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4416                bfa_reqq_wcancel(&rp->reqq_wait);
4417                bfa_rport_free(rp);
4418                break;
4419
4420        case BFA_RPORT_SM_OFFLINE:
4421                bfa_stats(rp, sm_fwc_off);
4422                bfa_sm_set_state(rp, bfa_rport_sm_offline);
4423                bfa_reqq_wcancel(&rp->reqq_wait);
4424                bfa_rport_offline_cb(rp);
4425                break;
4426
4427        case BFA_RPORT_SM_HWFAIL:
4428                bfa_stats(rp, sm_fwc_hwf);
4429                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4430                bfa_reqq_wcancel(&rp->reqq_wait);
4431                break;
4432
4433        default:
4434                bfa_stats(rp, sm_fwc_unexp);
4435                bfa_sm_fault(rp->bfa, event);
4436        }
4437}
4438
4439/*
4440 * Online state - normal parking state.
4441 */
4442static void
4443bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4444{
4445        struct bfi_rport_qos_scn_s *qos_scn;
4446
4447        bfa_trc(rp->bfa, rp->rport_tag);
4448        bfa_trc(rp->bfa, event);
4449
4450        switch (event) {
4451        case BFA_RPORT_SM_OFFLINE:
4452                bfa_stats(rp, sm_on_off);
4453                if (bfa_rport_send_fwdelete(rp))
4454                        bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4455                else
4456                        bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4457                break;
4458
4459        case BFA_RPORT_SM_DELETE:
4460                bfa_stats(rp, sm_on_del);
4461                if (bfa_rport_send_fwdelete(rp))
4462                        bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4463                else
4464                        bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4465                break;
4466
4467        case BFA_RPORT_SM_HWFAIL:
4468                bfa_stats(rp, sm_on_hwf);
4469                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4470                break;
4471
4472        case BFA_RPORT_SM_SET_SPEED:
4473                bfa_rport_send_fwspeed(rp);
4474                break;
4475
4476        case BFA_RPORT_SM_QOS_SCN:
4477                qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4478                rp->qos_attr = qos_scn->new_qos_attr;
4479                bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4480                bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4481                bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4482                bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4483
4484                qos_scn->old_qos_attr.qos_flow_id  =
4485                        be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4486                qos_scn->new_qos_attr.qos_flow_id  =
4487                        be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4488
4489                if (qos_scn->old_qos_attr.qos_flow_id !=
4490                        qos_scn->new_qos_attr.qos_flow_id)
4491                        bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4492                                                    qos_scn->old_qos_attr,
4493                                                    qos_scn->new_qos_attr);
4494                if (qos_scn->old_qos_attr.qos_priority !=
4495                        qos_scn->new_qos_attr.qos_priority)
4496                        bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4497                                                  qos_scn->old_qos_attr,
4498                                                  qos_scn->new_qos_attr);
4499                break;
4500
4501        default:
4502                bfa_stats(rp, sm_on_unexp);
4503                bfa_sm_fault(rp->bfa, event);
4504        }
4505}
4506
4507/*
4508 * Firmware rport is being deleted - awaiting f/w response.
4509 */
4510static void
4511bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4512{
4513        bfa_trc(rp->bfa, rp->rport_tag);
4514        bfa_trc(rp->bfa, event);
4515
4516        switch (event) {
4517        case BFA_RPORT_SM_FWRSP:
4518                bfa_stats(rp, sm_fwd_rsp);
4519                bfa_sm_set_state(rp, bfa_rport_sm_offline);
4520                bfa_rport_offline_cb(rp);
4521                break;
4522
4523        case BFA_RPORT_SM_DELETE:
4524                bfa_stats(rp, sm_fwd_del);
4525                bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4526                break;
4527
4528        case BFA_RPORT_SM_HWFAIL:
4529                bfa_stats(rp, sm_fwd_hwf);
4530                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4531                bfa_rport_offline_cb(rp);
4532                break;
4533
4534        default:
4535                bfa_stats(rp, sm_fwd_unexp);
4536                bfa_sm_fault(rp->bfa, event);
4537        }
4538}
4539
4540static void
4541bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4542{
4543        bfa_trc(rp->bfa, rp->rport_tag);
4544        bfa_trc(rp->bfa, event);
4545
4546        switch (event) {
4547        case BFA_RPORT_SM_QRESUME:
4548                bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4549                bfa_rport_send_fwdelete(rp);
4550                break;
4551
4552        case BFA_RPORT_SM_DELETE:
4553                bfa_stats(rp, sm_fwd_del);
4554                bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4555                break;
4556
4557        case BFA_RPORT_SM_HWFAIL:
4558                bfa_stats(rp, sm_fwd_hwf);
4559                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4560                bfa_reqq_wcancel(&rp->reqq_wait);
4561                bfa_rport_offline_cb(rp);
4562                break;
4563
4564        default:
4565                bfa_stats(rp, sm_fwd_unexp);
4566                bfa_sm_fault(rp->bfa, event);
4567        }
4568}
4569
4570/*
4571 * Offline state.
4572 */
4573static void
4574bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4575{
4576        bfa_trc(rp->bfa, rp->rport_tag);
4577        bfa_trc(rp->bfa, event);
4578
4579        switch (event) {
4580        case BFA_RPORT_SM_DELETE:
4581                bfa_stats(rp, sm_off_del);
4582                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4583                bfa_rport_free(rp);
4584                break;
4585
4586        case BFA_RPORT_SM_ONLINE:
4587                bfa_stats(rp, sm_off_on);
4588                if (bfa_rport_send_fwcreate(rp))
4589                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4590                else
4591                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4592                break;
4593
4594        case BFA_RPORT_SM_HWFAIL:
4595                bfa_stats(rp, sm_off_hwf);
4596                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4597                break;
4598
4599        case BFA_RPORT_SM_OFFLINE:
4600                bfa_rport_offline_cb(rp);
4601                break;
4602
4603        default:
4604                bfa_stats(rp, sm_off_unexp);
4605                bfa_sm_fault(rp->bfa, event);
4606        }
4607}
4608
4609/*
4610 * Rport is deleted, waiting for firmware response to delete.
4611 */
4612static void
4613bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4614{
4615        bfa_trc(rp->bfa, rp->rport_tag);
4616        bfa_trc(rp->bfa, event);
4617
4618        switch (event) {
4619        case BFA_RPORT_SM_FWRSP:
4620                bfa_stats(rp, sm_del_fwrsp);
4621                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4622                bfa_rport_free(rp);
4623                break;
4624
4625        case BFA_RPORT_SM_HWFAIL:
4626                bfa_stats(rp, sm_del_hwf);
4627                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4628                bfa_rport_free(rp);
4629                break;
4630
4631        default:
4632                bfa_sm_fault(rp->bfa, event);
4633        }
4634}
4635
4636static void
4637bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4638{
4639        bfa_trc(rp->bfa, rp->rport_tag);
4640        bfa_trc(rp->bfa, event);
4641
4642        switch (event) {
4643        case BFA_RPORT_SM_QRESUME:
4644                bfa_stats(rp, sm_del_fwrsp);
4645                bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4646                bfa_rport_send_fwdelete(rp);
4647                break;
4648
4649        case BFA_RPORT_SM_HWFAIL:
4650                bfa_stats(rp, sm_del_hwf);
4651                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4652                bfa_reqq_wcancel(&rp->reqq_wait);
4653                bfa_rport_free(rp);
4654                break;
4655
4656        default:
4657                bfa_sm_fault(rp->bfa, event);
4658        }
4659}
4660
4661/*
4662 * Waiting for rport create response from firmware. A delete is pending.
4663 */
4664static void
4665bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4666                                enum bfa_rport_event event)
4667{
4668        bfa_trc(rp->bfa, rp->rport_tag);
4669        bfa_trc(rp->bfa, event);
4670
4671        switch (event) {
4672        case BFA_RPORT_SM_FWRSP:
4673                bfa_stats(rp, sm_delp_fwrsp);
4674                if (bfa_rport_send_fwdelete(rp))
4675                        bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4676                else
4677                        bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4678                break;
4679
4680        case BFA_RPORT_SM_HWFAIL:
4681                bfa_stats(rp, sm_delp_hwf);
4682                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4683                bfa_rport_free(rp);
4684                break;
4685
4686        default:
4687                bfa_stats(rp, sm_delp_unexp);
4688                bfa_sm_fault(rp->bfa, event);
4689        }
4690}
4691
4692/*
4693 * Waiting for rport create response from firmware. Rport offline is pending.
4694 */
4695static void
4696bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4697                                 enum bfa_rport_event event)
4698{
4699        bfa_trc(rp->bfa, rp->rport_tag);
4700        bfa_trc(rp->bfa, event);
4701
4702        switch (event) {
4703        case BFA_RPORT_SM_FWRSP:
4704                bfa_stats(rp, sm_offp_fwrsp);
4705                if (bfa_rport_send_fwdelete(rp))
4706                        bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4707                else
4708                        bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4709                break;
4710
4711        case BFA_RPORT_SM_DELETE:
4712                bfa_stats(rp, sm_offp_del);
4713                bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4714                break;
4715
4716        case BFA_RPORT_SM_HWFAIL:
4717                bfa_stats(rp, sm_offp_hwf);
4718                bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4719                bfa_rport_offline_cb(rp);
4720                break;
4721
4722        default:
4723                bfa_stats(rp, sm_offp_unexp);
4724                bfa_sm_fault(rp->bfa, event);
4725        }
4726}
4727
4728/*
4729 * IOC h/w failed.
4730 */
4731static void
4732bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4733{
4734        bfa_trc(rp->bfa, rp->rport_tag);
4735        bfa_trc(rp->bfa, event);
4736
4737        switch (event) {
4738        case BFA_RPORT_SM_OFFLINE:
4739                bfa_stats(rp, sm_iocd_off);
4740                bfa_rport_offline_cb(rp);
4741                break;
4742
4743        case BFA_RPORT_SM_DELETE:
4744                bfa_stats(rp, sm_iocd_del);
4745                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4746                bfa_rport_free(rp);
4747                break;
4748
4749        case BFA_RPORT_SM_ONLINE:
4750                bfa_stats(rp, sm_iocd_on);
4751                if (bfa_rport_send_fwcreate(rp))
4752                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4753                else
4754                        bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4755                break;
4756
4757        case BFA_RPORT_SM_HWFAIL:
4758                break;
4759
4760        default:
4761                bfa_stats(rp, sm_iocd_unexp);
4762                bfa_sm_fault(rp->bfa, event);
4763        }
4764}
4765
4766
4767
4768/*
4769 *  bfa_rport_private BFA rport private functions
4770 */
4771
4772static void
4773__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4774{
4775        struct bfa_rport_s *rp = cbarg;
4776
4777        if (complete)
4778                bfa_cb_rport_online(rp->rport_drv);
4779}
4780
4781static void
4782__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4783{
4784        struct bfa_rport_s *rp = cbarg;
4785
4786        if (complete)
4787                bfa_cb_rport_offline(rp->rport_drv);
4788}
4789
4790static void
4791bfa_rport_qresume(void *cbarg)
4792{
4793        struct bfa_rport_s      *rp = cbarg;
4794
4795        bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4796}
4797
4798void
4799bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4800                struct bfa_s *bfa)
4801{
4802        struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4803
4804        if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4805                cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4806
4807        /* kva memory */
4808        bfa_mem_kva_setup(minfo, rport_kva,
4809                cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4810}
4811
4812void
4813bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4814                struct bfa_pcidev_s *pcidev)
4815{
4816        struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4817        struct bfa_rport_s *rp;
4818        u16 i;
4819
4820        INIT_LIST_HEAD(&mod->rp_free_q);
4821        INIT_LIST_HEAD(&mod->rp_active_q);
4822        INIT_LIST_HEAD(&mod->rp_unused_q);
4823
4824        rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4825        mod->rps_list = rp;
4826        mod->num_rports = cfg->fwcfg.num_rports;
4827
4828        WARN_ON(!mod->num_rports ||
4829                   (mod->num_rports & (mod->num_rports - 1)));
4830
4831        for (i = 0; i < mod->num_rports; i++, rp++) {
4832                memset(rp, 0, sizeof(struct bfa_rport_s));
4833                rp->bfa = bfa;
4834                rp->rport_tag = i;
4835                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4836
4837                /*
4838                 *  - is unused
4839                 */
4840                if (i)
4841                        list_add_tail(&rp->qe, &mod->rp_free_q);
4842
4843                bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4844        }
4845
4846        /*
4847         * consume memory
4848         */
4849        bfa_mem_kva_curp(mod) = (u8 *) rp;
4850}
4851
4852void
4853bfa_rport_iocdisable(struct bfa_s *bfa)
4854{
4855        struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4856        struct bfa_rport_s *rport;
4857        struct list_head *qe, *qen;
4858
4859        /* Enqueue unused rport resources to free_q */
4860        list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4861
4862        list_for_each_safe(qe, qen, &mod->rp_active_q) {
4863                rport = (struct bfa_rport_s *) qe;
4864                bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4865        }
4866}
4867
4868static struct bfa_rport_s *
4869bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4870{
4871        struct bfa_rport_s *rport;
4872
4873        bfa_q_deq(&mod->rp_free_q, &rport);
4874        if (rport)
4875                list_add_tail(&rport->qe, &mod->rp_active_q);
4876
4877        return rport;
4878}
4879
4880static void
4881bfa_rport_free(struct bfa_rport_s *rport)
4882{
4883        struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4884
4885        WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4886        list_del(&rport->qe);
4887        list_add_tail(&rport->qe, &mod->rp_free_q);
4888}
4889
4890static bfa_boolean_t
4891bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4892{
4893        struct bfi_rport_create_req_s *m;
4894
4895        /*
4896         * check for room in queue to send request now
4897         */
4898        m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4899        if (!m) {
4900                bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4901                return BFA_FALSE;
4902        }
4903
4904        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4905                        bfa_fn_lpu(rp->bfa));
4906        m->bfa_handle = rp->rport_tag;
4907        m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4908        m->pid = rp->rport_info.pid;
4909        m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4910        m->local_pid = rp->rport_info.local_pid;
4911        m->fc_class = rp->rport_info.fc_class;
4912        m->vf_en = rp->rport_info.vf_en;
4913        m->vf_id = rp->rport_info.vf_id;
4914        m->cisc = rp->rport_info.cisc;
4915
4916        /*
4917         * queue I/O message to firmware
4918         */
4919        bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4920        return BFA_TRUE;
4921}
4922
4923static bfa_boolean_t
4924bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4925{
4926        struct bfi_rport_delete_req_s *m;
4927
4928        /*
4929         * check for room in queue to send request now
4930         */
4931        m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4932        if (!m) {
4933                bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4934                return BFA_FALSE;
4935        }
4936
4937        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4938                        bfa_fn_lpu(rp->bfa));
4939        m->fw_handle = rp->fw_handle;
4940
4941        /*
4942         * queue I/O message to firmware
4943         */
4944        bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4945        return BFA_TRUE;
4946}
4947
4948static bfa_boolean_t
4949bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4950{
4951        struct bfa_rport_speed_req_s *m;
4952
4953        /*
4954         * check for room in queue to send request now
4955         */
4956        m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4957        if (!m) {
4958                bfa_trc(rp->bfa, rp->rport_info.speed);
4959                return BFA_FALSE;
4960        }
4961
4962        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4963                        bfa_fn_lpu(rp->bfa));
4964        m->fw_handle = rp->fw_handle;
4965        m->speed = (u8)rp->rport_info.speed;
4966
4967        /*
4968         * queue I/O message to firmware
4969         */
4970        bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4971        return BFA_TRUE;
4972}
4973
4974
4975
4976/*
4977 *  bfa_rport_public
4978 */
4979
4980/*
4981 * Rport interrupt processing.
4982 */
4983void
4984bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4985{
4986        union bfi_rport_i2h_msg_u msg;
4987        struct bfa_rport_s *rp;
4988
4989        bfa_trc(bfa, m->mhdr.msg_id);
4990
4991        msg.msg = m;
4992
4993        switch (m->mhdr.msg_id) {
4994        case BFI_RPORT_I2H_CREATE_RSP:
4995                rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4996                rp->fw_handle = msg.create_rsp->fw_handle;
4997                rp->qos_attr = msg.create_rsp->qos_attr;
4998                bfa_rport_set_lunmask(bfa, rp);
4999                WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
5000                bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5001                break;
5002
5003        case BFI_RPORT_I2H_DELETE_RSP:
5004                rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
5005                WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
5006                bfa_rport_unset_lunmask(bfa, rp);
5007                bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5008                break;
5009
5010        case BFI_RPORT_I2H_QOS_SCN:
5011                rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
5012                rp->event_arg.fw_msg = msg.qos_scn_evt;
5013                bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
5014                break;
5015
5016        case BFI_RPORT_I2H_LIP_SCN_ONLINE:
5017                bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
5018                                &msg.lip_scn->loop_info);
5019                bfa_cb_rport_scn_online(bfa);
5020                break;
5021
5022        case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
5023                bfa_cb_rport_scn_offline(bfa);
5024                break;
5025
5026        case BFI_RPORT_I2H_NO_DEV:
5027                rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
5028                bfa_cb_rport_scn_no_dev(rp->rport_drv);
5029                break;
5030
5031        default:
5032                bfa_trc(bfa, m->mhdr.msg_id);
5033                WARN_ON(1);
5034        }
5035}
5036
5037void
5038bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
5039{
5040        struct bfa_rport_mod_s  *mod = BFA_RPORT_MOD(bfa);
5041        struct list_head        *qe;
5042        int     i;
5043
5044        for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
5045                bfa_q_deq_tail(&mod->rp_free_q, &qe);
5046                list_add_tail(qe, &mod->rp_unused_q);
5047        }
5048}
5049
5050/*
5051 *  bfa_rport_api
5052 */
5053
5054struct bfa_rport_s *
5055bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
5056{
5057        struct bfa_rport_s *rp;
5058
5059        rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
5060
5061        if (rp == NULL)
5062                return NULL;
5063
5064        rp->bfa = bfa;
5065        rp->rport_drv = rport_drv;
5066        memset(&rp->stats, 0, sizeof(rp->stats));
5067
5068        WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
5069        bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
5070
5071        return rp;
5072}
5073
5074void
5075bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
5076{
5077        WARN_ON(rport_info->max_frmsz == 0);
5078
5079        /*
5080         * Some JBODs are seen to be not setting PDU size correctly in PLOGI
5081         * responses. Default to minimum size.
5082         */
5083        if (rport_info->max_frmsz == 0) {
5084                bfa_trc(rport->bfa, rport->rport_tag);
5085                rport_info->max_frmsz = FC_MIN_PDUSZ;
5086        }
5087
5088        rport->rport_info = *rport_info;
5089        bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
5090}
5091
5092void
5093bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
5094{
5095        WARN_ON(speed == 0);
5096        WARN_ON(speed == BFA_PORT_SPEED_AUTO);
5097
5098        if (rport) {
5099                rport->rport_info.speed = speed;
5100                bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
5101        }
5102}
5103
5104/* Set Rport LUN Mask */
5105void
5106bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5107{
5108        struct bfa_lps_mod_s    *lps_mod = BFA_LPS_MOD(bfa);
5109        wwn_t   lp_wwn, rp_wwn;
5110        u8 lp_tag = (u8)rp->rport_info.lp_tag;
5111
5112        rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5113        lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5114
5115        BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5116                                        rp->lun_mask = BFA_TRUE;
5117        bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
5118}
5119
5120/* Unset Rport LUN mask */
5121void
5122bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5123{
5124        struct bfa_lps_mod_s    *lps_mod = BFA_LPS_MOD(bfa);
5125        wwn_t   lp_wwn, rp_wwn;
5126
5127        rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5128        lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5129
5130        BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5131                                rp->lun_mask = BFA_FALSE;
5132        bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
5133                        BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
5134}
5135
5136/*
5137 * SGPG related functions
5138 */
5139
5140/*
5141 * Compute and return memory needed by FCP(im) module.
5142 */
5143void
5144bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5145                struct bfa_s *bfa)
5146{
5147        struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
5148        struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
5149        struct bfa_mem_dma_s *seg_ptr;
5150        u16     nsegs, idx, per_seg_sgpg, num_sgpg;
5151        u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
5152
5153        if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
5154                cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
5155        else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
5156                cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
5157
5158        num_sgpg = cfg->drvcfg.num_sgpgs;
5159
5160        nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5161        per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
5162
5163        bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
5164                if (num_sgpg >= per_seg_sgpg) {
5165                        num_sgpg -= per_seg_sgpg;
5166                        bfa_mem_dma_setup(minfo, seg_ptr,
5167                                        per_seg_sgpg * sgpg_sz);
5168                } else
5169                        bfa_mem_dma_setup(minfo, seg_ptr,
5170                                        num_sgpg * sgpg_sz);
5171        }
5172
5173        /* kva memory */
5174        bfa_mem_kva_setup(minfo, sgpg_kva,
5175                cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
5176}
5177
5178void
5179bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5180                struct bfa_pcidev_s *pcidev)
5181{
5182        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5183        struct bfa_sgpg_s *hsgpg;
5184        struct bfi_sgpg_s *sgpg;
5185        u64 align_len;
5186        struct bfa_mem_dma_s *seg_ptr;
5187        u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
5188        u16     i, idx, nsegs, per_seg_sgpg, num_sgpg;
5189
5190        union {
5191                u64 pa;
5192                union bfi_addr_u addr;
5193        } sgpg_pa, sgpg_pa_tmp;
5194
5195        INIT_LIST_HEAD(&mod->sgpg_q);
5196        INIT_LIST_HEAD(&mod->sgpg_wait_q);
5197
5198        bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
5199
5200        mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
5201
5202        num_sgpg = cfg->drvcfg.num_sgpgs;
5203        nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5204
5205        /* dma/kva mem claim */
5206        hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
5207
5208        bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
5209
5210                if (!bfa_mem_dma_virt(seg_ptr))
5211                        break;
5212
5213                align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
5214                                             bfa_mem_dma_phys(seg_ptr);
5215
5216                sgpg = (struct bfi_sgpg_s *)
5217                        (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
5218                sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
5219                WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
5220
5221                per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
5222
5223                for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
5224                        memset(hsgpg, 0, sizeof(*hsgpg));
5225                        memset(sgpg, 0, sizeof(*sgpg));
5226
5227                        hsgpg->sgpg = sgpg;
5228                        sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
5229                        hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
5230                        list_add_tail(&hsgpg->qe, &mod->sgpg_q);
5231
5232                        sgpg++;
5233                        hsgpg++;
5234                        sgpg_pa.pa += sgpg_sz;
5235                }
5236        }
5237
5238        bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
5239}
5240
5241bfa_status_t
5242bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5243{
5244        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5245        struct bfa_sgpg_s *hsgpg;
5246        int i;
5247
5248        if (mod->free_sgpgs < nsgpgs)
5249                return BFA_STATUS_ENOMEM;
5250
5251        for (i = 0; i < nsgpgs; i++) {
5252                bfa_q_deq(&mod->sgpg_q, &hsgpg);
5253                WARN_ON(!hsgpg);
5254                list_add_tail(&hsgpg->qe, sgpg_q);
5255        }
5256
5257        mod->free_sgpgs -= nsgpgs;
5258        return BFA_STATUS_OK;
5259}
5260
5261void
5262bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5263{
5264        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5265        struct bfa_sgpg_wqe_s *wqe;
5266
5267        mod->free_sgpgs += nsgpg;
5268        WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
5269
5270        list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5271
5272        if (list_empty(&mod->sgpg_wait_q))
5273                return;
5274
5275        /*
5276         * satisfy as many waiting requests as possible
5277         */
5278        do {
5279                wqe = bfa_q_first(&mod->sgpg_wait_q);
5280                if (mod->free_sgpgs < wqe->nsgpg)
5281                        nsgpg = mod->free_sgpgs;
5282                else
5283                        nsgpg = wqe->nsgpg;
5284                bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5285                wqe->nsgpg -= nsgpg;
5286                if (wqe->nsgpg == 0) {
5287                        list_del(&wqe->qe);
5288                        wqe->cbfn(wqe->cbarg);
5289                }
5290        } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5291}
5292
5293void
5294bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5295{
5296        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5297
5298        WARN_ON(nsgpg <= 0);
5299        WARN_ON(nsgpg <= mod->free_sgpgs);
5300
5301        wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5302
5303        /*
5304         * allocate any left to this one first
5305         */
5306        if (mod->free_sgpgs) {
5307                /*
5308                 * no one else is waiting for SGPG
5309                 */
5310                WARN_ON(!list_empty(&mod->sgpg_wait_q));
5311                list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5312                wqe->nsgpg -= mod->free_sgpgs;
5313                mod->free_sgpgs = 0;
5314        }
5315
5316        list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5317}
5318
5319void
5320bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5321{
5322        struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5323
5324        WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5325        list_del(&wqe->qe);
5326
5327        if (wqe->nsgpg_total != wqe->nsgpg)
5328                bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5329                                   wqe->nsgpg_total - wqe->nsgpg);
5330}
5331
5332void
5333bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5334                   void *cbarg)
5335{
5336        INIT_LIST_HEAD(&wqe->sgpg_q);
5337        wqe->cbfn = cbfn;
5338        wqe->cbarg = cbarg;
5339}
5340
5341/*
5342 *  UF related functions
5343 */
5344/*
5345 *****************************************************************************
5346 * Internal functions
5347 *****************************************************************************
5348 */
5349static void
5350__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5351{
5352        struct bfa_uf_s   *uf = cbarg;
5353        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5354
5355        if (complete)
5356                ufm->ufrecv(ufm->cbarg, uf);
5357}
5358
5359static void
5360claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5361{
5362        struct bfi_uf_buf_post_s *uf_bp_msg;
5363        u16 i;
5364        u16 buf_len;
5365
5366        ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5367        uf_bp_msg = ufm->uf_buf_posts;
5368
5369        for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5370             i++, uf_bp_msg++) {
5371                memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5372
5373                uf_bp_msg->buf_tag = i;
5374                buf_len = sizeof(struct bfa_uf_buf_s);
5375                uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5376                bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5377                            bfa_fn_lpu(ufm->bfa));
5378                bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5379        }
5380
5381        /*
5382         * advance pointer beyond consumed memory
5383         */
5384        bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5385}
5386
5387static void
5388claim_ufs(struct bfa_uf_mod_s *ufm)
5389{
5390        u16 i;
5391        struct bfa_uf_s   *uf;
5392
5393        /*
5394         * Claim block of memory for UF list
5395         */
5396        ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5397
5398        /*
5399         * Initialize UFs and queue it in UF free queue
5400         */
5401        for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5402                memset(uf, 0, sizeof(struct bfa_uf_s));
5403                uf->bfa = ufm->bfa;
5404                uf->uf_tag = i;
5405                uf->pb_len = BFA_PER_UF_DMA_SZ;
5406                uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5407                uf->buf_pa = ufm_pbs_pa(ufm, i);
5408                list_add_tail(&uf->qe, &ufm->uf_free_q);
5409        }
5410
5411        /*
5412         * advance memory pointer
5413         */
5414        bfa_mem_kva_curp(ufm) = (u8 *) uf;
5415}
5416
5417static void
5418uf_mem_claim(struct bfa_uf_mod_s *ufm)
5419{
5420        claim_ufs(ufm);
5421        claim_uf_post_msgs(ufm);
5422}
5423
5424void
5425bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5426                struct bfa_s *bfa)
5427{
5428        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5429        struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5430        u32     num_ufs = cfg->fwcfg.num_uf_bufs;
5431        struct bfa_mem_dma_s *seg_ptr;
5432        u16     nsegs, idx, per_seg_uf = 0;
5433
5434        nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5435        per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5436
5437        bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5438                if (num_ufs >= per_seg_uf) {
5439                        num_ufs -= per_seg_uf;
5440                        bfa_mem_dma_setup(minfo, seg_ptr,
5441                                per_seg_uf * BFA_PER_UF_DMA_SZ);
5442                } else
5443                        bfa_mem_dma_setup(minfo, seg_ptr,
5444                                num_ufs * BFA_PER_UF_DMA_SZ);
5445        }
5446
5447        /* kva memory */
5448        bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5449                (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5450}
5451
5452void
5453bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5454                struct bfa_pcidev_s *pcidev)
5455{
5456        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5457
5458        ufm->bfa = bfa;
5459        ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5460        INIT_LIST_HEAD(&ufm->uf_free_q);
5461        INIT_LIST_HEAD(&ufm->uf_posted_q);
5462        INIT_LIST_HEAD(&ufm->uf_unused_q);
5463
5464        uf_mem_claim(ufm);
5465}
5466
5467static struct bfa_uf_s *
5468bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5469{
5470        struct bfa_uf_s   *uf;
5471
5472        bfa_q_deq(&uf_mod->uf_free_q, &uf);
5473        return uf;
5474}
5475
5476static void
5477bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5478{
5479        list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5480}
5481
5482static bfa_status_t
5483bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5484{
5485        struct bfi_uf_buf_post_s *uf_post_msg;
5486
5487        uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5488        if (!uf_post_msg)
5489                return BFA_STATUS_FAILED;
5490
5491        memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5492                      sizeof(struct bfi_uf_buf_post_s));
5493        bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5494
5495        bfa_trc(ufm->bfa, uf->uf_tag);
5496
5497        list_add_tail(&uf->qe, &ufm->uf_posted_q);
5498        return BFA_STATUS_OK;
5499}
5500
5501static void
5502bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5503{
5504        struct bfa_uf_s   *uf;
5505
5506        while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5507                if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5508                        break;
5509        }
5510}
5511
5512static void
5513uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5514{
5515        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5516        u16 uf_tag = m->buf_tag;
5517        struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5518        struct bfa_uf_buf_s *uf_buf;
5519        uint8_t *buf;
5520        struct fchs_s *fchs;
5521
5522        uf_buf = (struct bfa_uf_buf_s *)
5523                        bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5524        buf = &uf_buf->d[0];
5525
5526        m->frm_len = be16_to_cpu(m->frm_len);
5527        m->xfr_len = be16_to_cpu(m->xfr_len);
5528
5529        fchs = (struct fchs_s *)uf_buf;
5530
5531        list_del(&uf->qe);      /* dequeue from posted queue */
5532
5533        uf->data_ptr = buf;
5534        uf->data_len = m->xfr_len;
5535
5536        WARN_ON(uf->data_len < sizeof(struct fchs_s));
5537
5538        if (uf->data_len == sizeof(struct fchs_s)) {
5539                bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5540                               uf->data_len, (struct fchs_s *)buf);
5541        } else {
5542                u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5543                bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5544                                      BFA_PL_EID_RX, uf->data_len,
5545                                      (struct fchs_s *)buf, pld_w0);
5546        }
5547
5548        if (bfa->fcs)
5549                __bfa_cb_uf_recv(uf, BFA_TRUE);
5550        else
5551                bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5552}
5553
5554void
5555bfa_uf_iocdisable(struct bfa_s *bfa)
5556{
5557        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5558        struct bfa_uf_s *uf;
5559        struct list_head *qe, *qen;
5560
5561        /* Enqueue unused uf resources to free_q */
5562        list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5563
5564        list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5565                uf = (struct bfa_uf_s *) qe;
5566                list_del(&uf->qe);
5567                bfa_uf_put(ufm, uf);
5568        }
5569}
5570
5571void
5572bfa_uf_start(struct bfa_s *bfa)
5573{
5574        bfa_uf_post_all(BFA_UF_MOD(bfa));
5575}
5576
5577/*
5578 * Register handler for all unsolicted receive frames.
5579 *
5580 * @param[in]   bfa             BFA instance
5581 * @param[in]   ufrecv  receive handler function
5582 * @param[in]   cbarg   receive handler arg
5583 */
5584void
5585bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5586{
5587        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5588
5589        ufm->ufrecv = ufrecv;
5590        ufm->cbarg = cbarg;
5591}
5592
5593/*
5594 *      Free an unsolicited frame back to BFA.
5595 *
5596 * @param[in]           uf              unsolicited frame to be freed
5597 *
5598 * @return None
5599 */
5600void
5601bfa_uf_free(struct bfa_uf_s *uf)
5602{
5603        bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5604        bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5605}
5606
5607
5608
5609/*
5610 *  uf_pub BFA uf module public functions
5611 */
5612void
5613bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5614{
5615        bfa_trc(bfa, msg->mhdr.msg_id);
5616
5617        switch (msg->mhdr.msg_id) {
5618        case BFI_UF_I2H_FRM_RCVD:
5619                uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5620                break;
5621
5622        default:
5623                bfa_trc(bfa, msg->mhdr.msg_id);
5624                WARN_ON(1);
5625        }
5626}
5627
5628void
5629bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5630{
5631        struct bfa_uf_mod_s     *mod = BFA_UF_MOD(bfa);
5632        struct list_head        *qe;
5633        int     i;
5634
5635        for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5636                bfa_q_deq_tail(&mod->uf_free_q, &qe);
5637                list_add_tail(qe, &mod->uf_unused_q);
5638        }
5639}
5640
5641/*
5642 *      Dport forward declaration
5643 */
5644
5645enum bfa_dport_test_state_e {
5646        BFA_DPORT_ST_DISABLED   = 0,    /*!< dport is disabled */
5647        BFA_DPORT_ST_INP        = 1,    /*!< test in progress */
5648        BFA_DPORT_ST_COMP       = 2,    /*!< test complete successfully */
5649        BFA_DPORT_ST_NO_SFP     = 3,    /*!< sfp is not present */
5650        BFA_DPORT_ST_NOTSTART   = 4,    /*!< test not start dport is enabled */
5651};
5652
5653/*
5654 * BFA DPORT state machine events
5655 */
5656enum bfa_dport_sm_event {
5657        BFA_DPORT_SM_ENABLE     = 1,    /* dport enable event         */
5658        BFA_DPORT_SM_DISABLE    = 2,    /* dport disable event        */
5659        BFA_DPORT_SM_FWRSP      = 3,    /* fw enable/disable rsp      */
5660        BFA_DPORT_SM_QRESUME    = 4,    /* CQ space available         */
5661        BFA_DPORT_SM_HWFAIL     = 5,    /* IOC h/w failure            */
5662        BFA_DPORT_SM_START      = 6,    /* re-start dport test        */
5663        BFA_DPORT_SM_REQFAIL    = 7,    /* request failure            */
5664        BFA_DPORT_SM_SCN        = 8,    /* state change notify frm fw */
5665};
5666
5667static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5668                                  enum bfa_dport_sm_event event);
5669static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5670                                  enum bfa_dport_sm_event event);
5671static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5672                                  enum bfa_dport_sm_event event);
5673static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5674                                 enum bfa_dport_sm_event event);
5675static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5676                                 enum bfa_dport_sm_event event);
5677static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5678                                   enum bfa_dport_sm_event event);
5679static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
5680                                        enum bfa_dport_sm_event event);
5681static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
5682                                  enum bfa_dport_sm_event event);
5683static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
5684                                   enum bfa_dport_sm_event event);
5685static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
5686                                   enum bfa_dport_sm_event event);
5687static void bfa_dport_qresume(void *cbarg);
5688static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5689                                struct bfi_diag_dport_rsp_s *msg);
5690static void bfa_dport_scn(struct bfa_dport_s *dport,
5691                                struct bfi_diag_dport_scn_s *msg);
5692
5693/*
5694 *      BFA fcdiag module
5695 */
5696#define BFA_DIAG_QTEST_TOV      1000    /* msec */
5697
5698/*
5699 *      Set port status to busy
5700 */
5701static void
5702bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5703{
5704        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5705
5706        if (fcdiag->lb.lock)
5707                fcport->diag_busy = BFA_TRUE;
5708        else
5709                fcport->diag_busy = BFA_FALSE;
5710}
5711
5712void
5713bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5714                struct bfa_pcidev_s *pcidev)
5715{
5716        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5717        struct bfa_dport_s  *dport = &fcdiag->dport;
5718
5719        fcdiag->bfa             = bfa;
5720        fcdiag->trcmod  = bfa->trcmod;
5721        /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5722        dport->bfa = bfa;
5723        bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5724        bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5725        dport->cbfn = NULL;
5726        dport->cbarg = NULL;
5727        dport->test_state = BFA_DPORT_ST_DISABLED;
5728        memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
5729}
5730
5731void
5732bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5733{
5734        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5735        struct bfa_dport_s *dport = &fcdiag->dport;
5736
5737        bfa_trc(fcdiag, fcdiag->lb.lock);
5738        if (fcdiag->lb.lock) {
5739                fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5740                fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5741                fcdiag->lb.lock = 0;
5742                bfa_fcdiag_set_busy_status(fcdiag);
5743        }
5744
5745        bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
5746}
5747
5748static void
5749bfa_fcdiag_queuetest_timeout(void *cbarg)
5750{
5751        struct bfa_fcdiag_s       *fcdiag = cbarg;
5752        struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5753
5754        bfa_trc(fcdiag, fcdiag->qtest.all);
5755        bfa_trc(fcdiag, fcdiag->qtest.count);
5756
5757        fcdiag->qtest.timer_active = 0;
5758
5759        res->status = BFA_STATUS_ETIMER;
5760        res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5761        if (fcdiag->qtest.all)
5762                res->queue  = fcdiag->qtest.all;
5763
5764        bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5765        fcdiag->qtest.status = BFA_STATUS_ETIMER;
5766        fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5767        fcdiag->qtest.lock = 0;
5768}
5769
5770static bfa_status_t
5771bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5772{
5773        u32     i;
5774        struct bfi_diag_qtest_req_s *req;
5775
5776        req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5777        if (!req)
5778                return BFA_STATUS_DEVBUSY;
5779
5780        /* build host command */
5781        bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5782                bfa_fn_lpu(fcdiag->bfa));
5783
5784        for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5785                req->data[i] = QTEST_PAT_DEFAULT;
5786
5787        bfa_trc(fcdiag, fcdiag->qtest.queue);
5788        /* ring door bell */
5789        bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5790        return BFA_STATUS_OK;
5791}
5792
5793static void
5794bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5795                        bfi_diag_qtest_rsp_t *rsp)
5796{
5797        struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5798        bfa_status_t status = BFA_STATUS_OK;
5799        int i;
5800
5801        /* Check timer, should still be active   */
5802        if (!fcdiag->qtest.timer_active) {
5803                bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5804                return;
5805        }
5806
5807        /* update count */
5808        fcdiag->qtest.count--;
5809
5810        /* Check result */
5811        for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5812                if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5813                        res->status = BFA_STATUS_DATACORRUPTED;
5814                        break;
5815                }
5816        }
5817
5818        if (res->status == BFA_STATUS_OK) {
5819                if (fcdiag->qtest.count > 0) {
5820                        status = bfa_fcdiag_queuetest_send(fcdiag);
5821                        if (status == BFA_STATUS_OK)
5822                                return;
5823                        else
5824                                res->status = status;
5825                } else if (fcdiag->qtest.all > 0 &&
5826                        fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5827                        fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5828                        fcdiag->qtest.queue++;
5829                        status = bfa_fcdiag_queuetest_send(fcdiag);
5830                        if (status == BFA_STATUS_OK)
5831                                return;
5832                        else
5833                                res->status = status;
5834                }
5835        }
5836
5837        /* Stop timer when we comp all queue */
5838        if (fcdiag->qtest.timer_active) {
5839                bfa_timer_stop(&fcdiag->qtest.timer);
5840                fcdiag->qtest.timer_active = 0;
5841        }
5842        res->queue = fcdiag->qtest.queue;
5843        res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5844        bfa_trc(fcdiag, res->count);
5845        bfa_trc(fcdiag, res->status);
5846        fcdiag->qtest.status = res->status;
5847        fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5848        fcdiag->qtest.lock = 0;
5849}
5850
5851static void
5852bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5853                        struct bfi_diag_lb_rsp_s *rsp)
5854{
5855        struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5856
5857        res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
5858        res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
5859        res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
5860        res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
5861        res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
5862        res->status     = rsp->res.status;
5863        fcdiag->lb.status = rsp->res.status;
5864        bfa_trc(fcdiag, fcdiag->lb.status);
5865        fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5866        fcdiag->lb.lock = 0;
5867        bfa_fcdiag_set_busy_status(fcdiag);
5868}
5869
5870static bfa_status_t
5871bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5872                        struct bfa_diag_loopback_s *loopback)
5873{
5874        struct bfi_diag_lb_req_s *lb_req;
5875
5876        lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5877        if (!lb_req)
5878                return BFA_STATUS_DEVBUSY;
5879
5880        /* build host command */
5881        bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5882                bfa_fn_lpu(fcdiag->bfa));
5883
5884        lb_req->lb_mode = loopback->lb_mode;
5885        lb_req->speed = loopback->speed;
5886        lb_req->loopcnt = loopback->loopcnt;
5887        lb_req->pattern = loopback->pattern;
5888
5889        /* ring door bell */
5890        bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5891
5892        bfa_trc(fcdiag, loopback->lb_mode);
5893        bfa_trc(fcdiag, loopback->speed);
5894        bfa_trc(fcdiag, loopback->loopcnt);
5895        bfa_trc(fcdiag, loopback->pattern);
5896        return BFA_STATUS_OK;
5897}
5898
5899/*
5900 *      cpe/rme intr handler
5901 */
5902void
5903bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5904{
5905        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5906
5907        switch (msg->mhdr.msg_id) {
5908        case BFI_DIAG_I2H_LOOPBACK:
5909                bfa_fcdiag_loopback_comp(fcdiag,
5910                                (struct bfi_diag_lb_rsp_s *) msg);
5911                break;
5912        case BFI_DIAG_I2H_QTEST:
5913                bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5914                break;
5915        case BFI_DIAG_I2H_DPORT:
5916                bfa_dport_req_comp(&fcdiag->dport,
5917                                (struct bfi_diag_dport_rsp_s *)msg);
5918                break;
5919        case BFI_DIAG_I2H_DPORT_SCN:
5920                bfa_dport_scn(&fcdiag->dport,
5921                                (struct bfi_diag_dport_scn_s *)msg);
5922                break;
5923        default:
5924                bfa_trc(fcdiag, msg->mhdr.msg_id);
5925                WARN_ON(1);
5926        }
5927}
5928
5929/*
5930 *      Loopback test
5931 *
5932 *   @param[in] *bfa            - bfa data struct
5933 *   @param[in] opmode          - port operation mode
5934 *   @param[in] speed           - port speed
5935 *   @param[in] lpcnt           - loop count
5936 *   @param[in] pat                     - pattern to build packet
5937 *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
5938 *   @param[in] cbfn            - callback function
5939 *   @param[in] cbarg           - callback functioin arg
5940 *
5941 *   @param[out]
5942 */
5943bfa_status_t
5944bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5945                enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5946                struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5947                void *cbarg)
5948{
5949        struct  bfa_diag_loopback_s loopback;
5950        struct bfa_port_attr_s attr;
5951        bfa_status_t status;
5952        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5953
5954        if (!bfa_iocfc_is_operational(bfa))
5955                return BFA_STATUS_IOC_NON_OP;
5956
5957        /* if port is PBC disabled, return error */
5958        if (bfa_fcport_is_pbcdisabled(bfa)) {
5959                bfa_trc(fcdiag, BFA_STATUS_PBC);
5960                return BFA_STATUS_PBC;
5961        }
5962
5963        if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5964                bfa_trc(fcdiag, opmode);
5965                return BFA_STATUS_PORT_NOT_DISABLED;
5966        }
5967
5968        /*
5969         * Check if input speed is supported by the port mode
5970         */
5971        if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5972                if (!(speed == BFA_PORT_SPEED_1GBPS ||
5973                      speed == BFA_PORT_SPEED_2GBPS ||
5974                      speed == BFA_PORT_SPEED_4GBPS ||
5975                      speed == BFA_PORT_SPEED_8GBPS ||
5976                      speed == BFA_PORT_SPEED_16GBPS ||
5977                      speed == BFA_PORT_SPEED_AUTO)) {
5978                        bfa_trc(fcdiag, speed);
5979                        return BFA_STATUS_UNSUPP_SPEED;
5980                }
5981                bfa_fcport_get_attr(bfa, &attr);
5982                bfa_trc(fcdiag, attr.speed_supported);
5983                if (speed > attr.speed_supported)
5984                        return BFA_STATUS_UNSUPP_SPEED;
5985        } else {
5986                if (speed != BFA_PORT_SPEED_10GBPS) {
5987                        bfa_trc(fcdiag, speed);
5988                        return BFA_STATUS_UNSUPP_SPEED;
5989                }
5990        }
5991
5992        /*
5993         * For CT2, 1G is not supported
5994         */
5995        if ((speed == BFA_PORT_SPEED_1GBPS) &&
5996            (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
5997                bfa_trc(fcdiag, speed);
5998                return BFA_STATUS_UNSUPP_SPEED;
5999        }
6000
6001        /* For Mezz card, port speed entered needs to be checked */
6002        if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
6003                if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
6004                        if (!(speed == BFA_PORT_SPEED_1GBPS ||
6005                              speed == BFA_PORT_SPEED_2GBPS ||
6006                              speed == BFA_PORT_SPEED_4GBPS ||
6007                              speed == BFA_PORT_SPEED_8GBPS ||
6008                              speed == BFA_PORT_SPEED_16GBPS ||
6009                              speed == BFA_PORT_SPEED_AUTO))
6010                                return BFA_STATUS_UNSUPP_SPEED;
6011                } else {
6012                        if (speed != BFA_PORT_SPEED_10GBPS)
6013                                return BFA_STATUS_UNSUPP_SPEED;
6014                }
6015        }
6016        /* check to see if fcport is dport */
6017        if (bfa_fcport_is_dport(bfa)) {
6018                bfa_trc(fcdiag, fcdiag->lb.lock);
6019                return BFA_STATUS_DPORT_ENABLED;
6020        }
6021        /* check to see if there is another destructive diag cmd running */
6022        if (fcdiag->lb.lock) {
6023                bfa_trc(fcdiag, fcdiag->lb.lock);
6024                return BFA_STATUS_DEVBUSY;
6025        }
6026
6027        fcdiag->lb.lock = 1;
6028        loopback.lb_mode = opmode;
6029        loopback.speed = speed;
6030        loopback.loopcnt = lpcnt;
6031        loopback.pattern = pat;
6032        fcdiag->lb.result = result;
6033        fcdiag->lb.cbfn = cbfn;
6034        fcdiag->lb.cbarg = cbarg;
6035        memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
6036        bfa_fcdiag_set_busy_status(fcdiag);
6037
6038        /* Send msg to fw */
6039        status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
6040        return status;
6041}
6042
6043/*
6044 *      DIAG queue test command
6045 *
6046 *   @param[in] *bfa            - bfa data struct
6047 *   @param[in] force           - 1: don't do ioc op checking
6048 *   @param[in] queue           - queue no. to test
6049 *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
6050 *   @param[in] cbfn            - callback function
6051 *   @param[in] *cbarg          - callback functioin arg
6052 *
6053 *   @param[out]
6054 */
6055bfa_status_t
6056bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
6057                struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
6058                void *cbarg)
6059{
6060        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6061        bfa_status_t status;
6062        bfa_trc(fcdiag, force);
6063        bfa_trc(fcdiag, queue);
6064
6065        if (!force && !bfa_iocfc_is_operational(bfa))
6066                return BFA_STATUS_IOC_NON_OP;
6067
6068        /* check to see if there is another destructive diag cmd running */
6069        if (fcdiag->qtest.lock) {
6070                bfa_trc(fcdiag, fcdiag->qtest.lock);
6071                return BFA_STATUS_DEVBUSY;
6072        }
6073
6074        /* Initialization */
6075        fcdiag->qtest.lock = 1;
6076        fcdiag->qtest.cbfn = cbfn;
6077        fcdiag->qtest.cbarg = cbarg;
6078        fcdiag->qtest.result = result;
6079        fcdiag->qtest.count = QTEST_CNT_DEFAULT;
6080
6081        /* Init test results */
6082        fcdiag->qtest.result->status = BFA_STATUS_OK;
6083        fcdiag->qtest.result->count  = 0;
6084
6085        /* send */
6086        if (queue < BFI_IOC_MAX_CQS) {
6087                fcdiag->qtest.result->queue  = (u8)queue;
6088                fcdiag->qtest.queue = (u8)queue;
6089                fcdiag->qtest.all   = 0;
6090        } else {
6091                fcdiag->qtest.result->queue  = 0;
6092                fcdiag->qtest.queue = 0;
6093                fcdiag->qtest.all   = 1;
6094        }
6095        status = bfa_fcdiag_queuetest_send(fcdiag);
6096
6097        /* Start a timer */
6098        if (status == BFA_STATUS_OK) {
6099                bfa_timer_start(bfa, &fcdiag->qtest.timer,
6100                                bfa_fcdiag_queuetest_timeout, fcdiag,
6101                                BFA_DIAG_QTEST_TOV);
6102                fcdiag->qtest.timer_active = 1;
6103        }
6104        return status;
6105}
6106
6107/*
6108 * DIAG PLB is running
6109 *
6110 *   @param[in] *bfa    - bfa data struct
6111 *
6112 *   @param[out]
6113 */
6114bfa_status_t
6115bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
6116{
6117        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6118        return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
6119}
6120
6121/*
6122 *      D-port
6123 */
6124#define bfa_dport_result_start(__dport, __mode) do {                            \
6125                (__dport)->result.start_time = ktime_get_real_seconds();        \
6126                (__dport)->result.status = DPORT_TEST_ST_INPRG;                 \
6127                (__dport)->result.mode = (__mode);                              \
6128                (__dport)->result.rp_pwwn = (__dport)->rp_pwwn;                 \
6129                (__dport)->result.rp_nwwn = (__dport)->rp_nwwn;                 \
6130                (__dport)->result.lpcnt = (__dport)->lpcnt;                     \
6131} while (0)
6132
6133static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
6134                                        enum bfi_dport_req req);
6135static void
6136bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
6137{
6138        if (dport->cbfn != NULL) {
6139                dport->cbfn(dport->cbarg, bfa_status);
6140                dport->cbfn = NULL;
6141                dport->cbarg = NULL;
6142        }
6143}
6144
6145static void
6146bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6147{
6148        bfa_trc(dport->bfa, event);
6149
6150        switch (event) {
6151        case BFA_DPORT_SM_ENABLE:
6152                bfa_fcport_dportenable(dport->bfa);
6153                if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
6154                        bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6155                else
6156                        bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
6157                break;
6158
6159        case BFA_DPORT_SM_DISABLE:
6160                /* Already disabled */
6161                break;
6162
6163        case BFA_DPORT_SM_HWFAIL:
6164                /* ignore */
6165                break;
6166
6167        case BFA_DPORT_SM_SCN:
6168                if (dport->i2hmsg.scn.state ==  BFI_DPORT_SCN_DDPORT_ENABLE) {
6169                        bfa_fcport_ddportenable(dport->bfa);
6170                        dport->dynamic = BFA_TRUE;
6171                        dport->test_state = BFA_DPORT_ST_NOTSTART;
6172                        bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6173                } else {
6174                        bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6175                        WARN_ON(1);
6176                }
6177                break;
6178
6179        default:
6180                bfa_sm_fault(dport->bfa, event);
6181        }
6182}
6183
6184static void
6185bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6186                            enum bfa_dport_sm_event event)
6187{
6188        bfa_trc(dport->bfa, event);
6189
6190        switch (event) {
6191        case BFA_DPORT_SM_QRESUME:
6192                bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6193                bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6194                break;
6195
6196        case BFA_DPORT_SM_HWFAIL:
6197                bfa_reqq_wcancel(&dport->reqq_wait);
6198                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6199                bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6200                break;
6201
6202        default:
6203                bfa_sm_fault(dport->bfa, event);
6204        }
6205}
6206
6207static void
6208bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6209{
6210        bfa_trc(dport->bfa, event);
6211
6212        switch (event) {
6213        case BFA_DPORT_SM_FWRSP:
6214                memset(&dport->result, 0,
6215                                sizeof(struct bfa_diag_dport_result_s));
6216                if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6217                        dport->test_state = BFA_DPORT_ST_NO_SFP;
6218                } else {
6219                        dport->test_state = BFA_DPORT_ST_INP;
6220                        bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
6221                }
6222                bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6223                break;
6224
6225        case BFA_DPORT_SM_REQFAIL:
6226                dport->test_state = BFA_DPORT_ST_DISABLED;
6227                bfa_fcport_dportdisable(dport->bfa);
6228                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6229                break;
6230
6231        case BFA_DPORT_SM_HWFAIL:
6232                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6233                bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6234                break;
6235
6236        default:
6237                bfa_sm_fault(dport->bfa, event);
6238        }
6239}
6240
6241static void
6242bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6243{
6244        bfa_trc(dport->bfa, event);
6245
6246        switch (event) {
6247        case BFA_DPORT_SM_START:
6248                if (bfa_dport_send_req(dport, BFI_DPORT_START))
6249                        bfa_sm_set_state(dport, bfa_dport_sm_starting);
6250                else
6251                        bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
6252                break;
6253
6254        case BFA_DPORT_SM_DISABLE:
6255                bfa_fcport_dportdisable(dport->bfa);
6256                if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6257                        bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6258                else
6259                        bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6260                break;
6261
6262        case BFA_DPORT_SM_HWFAIL:
6263                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6264                break;
6265
6266        case BFA_DPORT_SM_SCN:
6267                switch (dport->i2hmsg.scn.state) {
6268                case BFI_DPORT_SCN_TESTCOMP:
6269                        dport->test_state = BFA_DPORT_ST_COMP;
6270                        break;
6271
6272                case BFI_DPORT_SCN_TESTSTART:
6273                        dport->test_state = BFA_DPORT_ST_INP;
6274                        break;
6275
6276                case BFI_DPORT_SCN_TESTSKIP:
6277                case BFI_DPORT_SCN_SUBTESTSTART:
6278                        /* no state change */
6279                        break;
6280
6281                case BFI_DPORT_SCN_SFP_REMOVED:
6282                        dport->test_state = BFA_DPORT_ST_NO_SFP;
6283                        break;
6284
6285                case BFI_DPORT_SCN_DDPORT_DISABLE:
6286                        bfa_fcport_ddportdisable(dport->bfa);
6287
6288                        if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
6289                                bfa_sm_set_state(dport,
6290                                         bfa_dport_sm_dynamic_disabling);
6291                        else
6292                                bfa_sm_set_state(dport,
6293                                         bfa_dport_sm_dynamic_disabling_qwait);
6294                        break;
6295
6296                case BFI_DPORT_SCN_FCPORT_DISABLE:
6297                        bfa_fcport_ddportdisable(dport->bfa);
6298
6299                        bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6300                        dport->dynamic = BFA_FALSE;
6301                        break;
6302
6303                default:
6304                        bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6305                        bfa_sm_fault(dport->bfa, event);
6306                }
6307                break;
6308        default:
6309                bfa_sm_fault(dport->bfa, event);
6310        }
6311}
6312
6313static void
6314bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6315                             enum bfa_dport_sm_event event)
6316{
6317        bfa_trc(dport->bfa, event);
6318
6319        switch (event) {
6320        case BFA_DPORT_SM_QRESUME:
6321                bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6322                bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6323                break;
6324
6325        case BFA_DPORT_SM_HWFAIL:
6326                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6327                bfa_reqq_wcancel(&dport->reqq_wait);
6328                bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6329                break;
6330
6331        case BFA_DPORT_SM_SCN:
6332                /* ignore */
6333                break;
6334
6335        default:
6336                bfa_sm_fault(dport->bfa, event);
6337        }
6338}
6339
6340static void
6341bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6342{
6343        bfa_trc(dport->bfa, event);
6344
6345        switch (event) {
6346        case BFA_DPORT_SM_FWRSP:
6347                dport->test_state = BFA_DPORT_ST_DISABLED;
6348                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6349                break;
6350
6351        case BFA_DPORT_SM_HWFAIL:
6352                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6353                bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6354                break;
6355
6356        case BFA_DPORT_SM_SCN:
6357                /* no state change */
6358                break;
6359
6360        default:
6361                bfa_sm_fault(dport->bfa, event);
6362        }
6363}
6364
6365static void
6366bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
6367                            enum bfa_dport_sm_event event)
6368{
6369        bfa_trc(dport->bfa, event);
6370
6371        switch (event) {
6372        case BFA_DPORT_SM_QRESUME:
6373                bfa_sm_set_state(dport, bfa_dport_sm_starting);
6374                bfa_dport_send_req(dport, BFI_DPORT_START);
6375                break;
6376
6377        case BFA_DPORT_SM_HWFAIL:
6378                bfa_reqq_wcancel(&dport->reqq_wait);
6379                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6380                bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6381                break;
6382
6383        default:
6384                bfa_sm_fault(dport->bfa, event);
6385        }
6386}
6387
6388static void
6389bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6390{
6391        bfa_trc(dport->bfa, event);
6392
6393        switch (event) {
6394        case BFA_DPORT_SM_FWRSP:
6395                memset(&dport->result, 0,
6396                                sizeof(struct bfa_diag_dport_result_s));
6397                if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6398                        dport->test_state = BFA_DPORT_ST_NO_SFP;
6399                } else {
6400                        dport->test_state = BFA_DPORT_ST_INP;
6401                        bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
6402                }
6403                /* fall thru */
6404
6405        case BFA_DPORT_SM_REQFAIL:
6406                bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6407                break;
6408
6409        case BFA_DPORT_SM_HWFAIL:
6410                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6411                bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6412                break;
6413
6414        default:
6415                bfa_sm_fault(dport->bfa, event);
6416        }
6417}
6418
6419static void
6420bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
6421                               enum bfa_dport_sm_event event)
6422{
6423        bfa_trc(dport->bfa, event);
6424
6425        switch (event) {
6426        case BFA_DPORT_SM_SCN:
6427                switch (dport->i2hmsg.scn.state) {
6428                case BFI_DPORT_SCN_DDPORT_DISABLED:
6429                        bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6430                        dport->dynamic = BFA_FALSE;
6431                        bfa_fcport_enable(dport->bfa);
6432                        break;
6433
6434                default:
6435                        bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6436                        bfa_sm_fault(dport->bfa, event);
6437
6438                }
6439                break;
6440
6441        case BFA_DPORT_SM_HWFAIL:
6442                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6443                bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6444                break;
6445
6446        default:
6447                bfa_sm_fault(dport->bfa, event);
6448        }
6449}
6450
6451static void
6452bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
6453                            enum bfa_dport_sm_event event)
6454{
6455        bfa_trc(dport->bfa, event);
6456
6457        switch (event) {
6458        case BFA_DPORT_SM_QRESUME:
6459                bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
6460                bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
6461                break;
6462
6463        case BFA_DPORT_SM_HWFAIL:
6464                bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6465                bfa_reqq_wcancel(&dport->reqq_wait);
6466                bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6467                break;
6468
6469        case BFA_DPORT_SM_SCN:
6470                /* ignore */
6471                break;
6472
6473        default:
6474                bfa_sm_fault(dport->bfa, event);
6475        }
6476}
6477
6478static bfa_boolean_t
6479bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6480{
6481        struct bfi_diag_dport_req_s *m;
6482
6483        /*
6484         * check for room in queue to send request now
6485         */
6486        m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6487        if (!m) {
6488                bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6489                return BFA_FALSE;
6490        }
6491
6492        bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6493                    bfa_fn_lpu(dport->bfa));
6494        m->req  = req;
6495        if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
6496                m->lpcnt = cpu_to_be32(dport->lpcnt);
6497                m->payload = cpu_to_be32(dport->payload);
6498        }
6499
6500        /*
6501         * queue I/O message to firmware
6502         */
6503        bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6504
6505        return BFA_TRUE;
6506}
6507
6508static void
6509bfa_dport_qresume(void *cbarg)
6510{
6511        struct bfa_dport_s *dport = cbarg;
6512
6513        bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6514}
6515
6516static void
6517bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
6518{
6519        msg->status = cpu_to_be32(msg->status);
6520        dport->i2hmsg.rsp.status = msg->status;
6521        dport->rp_pwwn = msg->pwwn;
6522        dport->rp_nwwn = msg->nwwn;
6523
6524        if ((msg->status == BFA_STATUS_OK) ||
6525            (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
6526                bfa_trc(dport->bfa, msg->status);
6527                bfa_trc(dport->bfa, dport->rp_pwwn);
6528                bfa_trc(dport->bfa, dport->rp_nwwn);
6529                bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6530
6531        } else {
6532                bfa_trc(dport->bfa, msg->status);
6533                bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
6534        }
6535        bfa_cb_fcdiag_dport(dport, msg->status);
6536}
6537
6538static bfa_boolean_t
6539bfa_dport_is_sending_req(struct bfa_dport_s *dport)
6540{
6541        if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling)      ||
6542            bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6543            bfa_sm_cmp_state(dport, bfa_dport_sm_disabling)     ||
6544            bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
6545            bfa_sm_cmp_state(dport, bfa_dport_sm_starting)      ||
6546            bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
6547                return BFA_TRUE;
6548        } else {
6549                return BFA_FALSE;
6550        }
6551}
6552
6553static void
6554bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
6555{
6556        int i;
6557        uint8_t subtesttype;
6558
6559        bfa_trc(dport->bfa, msg->state);
6560        dport->i2hmsg.scn.state = msg->state;
6561
6562        switch (dport->i2hmsg.scn.state) {
6563        case BFI_DPORT_SCN_TESTCOMP:
6564                dport->result.end_time = ktime_get_real_seconds();
6565                bfa_trc(dport->bfa, dport->result.end_time);
6566
6567                dport->result.status = msg->info.testcomp.status;
6568                bfa_trc(dport->bfa, dport->result.status);
6569
6570                dport->result.roundtrip_latency =
6571                        cpu_to_be32(msg->info.testcomp.latency);
6572                dport->result.est_cable_distance =
6573                        cpu_to_be32(msg->info.testcomp.distance);
6574                dport->result.buffer_required =
6575                        be16_to_cpu(msg->info.testcomp.numbuffer);
6576
6577                dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
6578                dport->result.speed = msg->info.testcomp.speed;
6579
6580                bfa_trc(dport->bfa, dport->result.roundtrip_latency);
6581                bfa_trc(dport->bfa, dport->result.est_cable_distance);
6582                bfa_trc(dport->bfa, dport->result.buffer_required);
6583                bfa_trc(dport->bfa, dport->result.frmsz);
6584                bfa_trc(dport->bfa, dport->result.speed);
6585
6586                for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
6587                        dport->result.subtest[i].status =
6588                                msg->info.testcomp.subtest_status[i];
6589                        bfa_trc(dport->bfa, dport->result.subtest[i].status);
6590                }
6591                break;
6592
6593        case BFI_DPORT_SCN_TESTSKIP:
6594        case BFI_DPORT_SCN_DDPORT_ENABLE:
6595                memset(&dport->result, 0,
6596                                sizeof(struct bfa_diag_dport_result_s));
6597                break;
6598
6599        case BFI_DPORT_SCN_TESTSTART:
6600                memset(&dport->result, 0,
6601                                sizeof(struct bfa_diag_dport_result_s));
6602                dport->rp_pwwn = msg->info.teststart.pwwn;
6603                dport->rp_nwwn = msg->info.teststart.nwwn;
6604                dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
6605                bfa_dport_result_start(dport, msg->info.teststart.mode);
6606                break;
6607
6608        case BFI_DPORT_SCN_SUBTESTSTART:
6609                subtesttype = msg->info.teststart.type;
6610                dport->result.subtest[subtesttype].start_time =
6611                        ktime_get_real_seconds();
6612                dport->result.subtest[subtesttype].status =
6613                        DPORT_TEST_ST_INPRG;
6614
6615                bfa_trc(dport->bfa, subtesttype);
6616                bfa_trc(dport->bfa,
6617                        dport->result.subtest[subtesttype].start_time);
6618                break;
6619
6620        case BFI_DPORT_SCN_SFP_REMOVED:
6621        case BFI_DPORT_SCN_DDPORT_DISABLED:
6622        case BFI_DPORT_SCN_DDPORT_DISABLE:
6623        case BFI_DPORT_SCN_FCPORT_DISABLE:
6624                dport->result.status = DPORT_TEST_ST_IDLE;
6625                break;
6626
6627        default:
6628                bfa_sm_fault(dport->bfa, msg->state);
6629        }
6630
6631        bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
6632}
6633
6634/*
6635 * Dport enable
6636 *
6637 * @param[in] *bfa            - bfa data struct
6638 */
6639bfa_status_t
6640bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6641                                bfa_cb_diag_t cbfn, void *cbarg)
6642{
6643        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6644        struct bfa_dport_s  *dport = &fcdiag->dport;
6645
6646        /*
6647         * Dport is not support in MEZZ card
6648         */
6649        if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6650                bfa_trc(dport->bfa, BFA_STATUS_PBC);
6651                return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6652        }
6653
6654        /*
6655         * Dport is supported in CT2 or above
6656         */
6657        if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
6658                bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
6659                return BFA_STATUS_FEATURE_NOT_SUPPORTED;
6660        }
6661
6662        /*
6663         * Check to see if IOC is down
6664        */
6665        if (!bfa_iocfc_is_operational(bfa))
6666                return BFA_STATUS_IOC_NON_OP;
6667
6668        /* if port is PBC disabled, return error */
6669        if (bfa_fcport_is_pbcdisabled(bfa)) {
6670                bfa_trc(dport->bfa, BFA_STATUS_PBC);
6671                return BFA_STATUS_PBC;
6672        }
6673
6674        /*
6675         * Check if port mode is FC port
6676         */
6677        if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6678                bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6679                return BFA_STATUS_CMD_NOTSUPP_CNA;
6680        }
6681
6682        /*
6683         * Check if port is in LOOP mode
6684         */
6685        if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6686            (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6687                bfa_trc(dport->bfa, 0);
6688                return BFA_STATUS_TOPOLOGY_LOOP;
6689        }
6690
6691        /*
6692         * Check if port is TRUNK mode
6693         */
6694        if (bfa_fcport_is_trunk_enabled(bfa)) {
6695                bfa_trc(dport->bfa, 0);
6696                return BFA_STATUS_ERROR_TRUNK_ENABLED;
6697        }
6698
6699        /*
6700         * Check if diag loopback is running
6701         */
6702        if (bfa_fcdiag_lb_is_running(bfa)) {
6703                bfa_trc(dport->bfa, 0);
6704                return BFA_STATUS_DIAG_BUSY;
6705        }
6706
6707        /*
6708         * Check to see if port is disable or in dport state
6709         */
6710        if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6711            (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6712                bfa_trc(dport->bfa, 0);
6713                return BFA_STATUS_PORT_NOT_DISABLED;
6714        }
6715
6716        /*
6717         * Check if dport is in dynamic mode
6718         */
6719        if (dport->dynamic)
6720                return BFA_STATUS_DDPORT_ERR;
6721
6722        /*
6723         * Check if dport is busy
6724         */
6725        if (bfa_dport_is_sending_req(dport))
6726                return BFA_STATUS_DEVBUSY;
6727
6728        /*
6729         * Check if dport is already enabled
6730         */
6731        if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6732                bfa_trc(dport->bfa, 0);
6733                return BFA_STATUS_DPORT_ENABLED;
6734        }
6735
6736        bfa_trc(dport->bfa, lpcnt);
6737        bfa_trc(dport->bfa, pat);
6738        dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6739        dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6740        dport->cbfn = cbfn;
6741        dport->cbarg = cbarg;
6742
6743        bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6744        return BFA_STATUS_OK;
6745}
6746
6747/*
6748 *      Dport disable
6749 *
6750 *      @param[in] *bfa            - bfa data struct
6751 */
6752bfa_status_t
6753bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6754{
6755        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6756        struct bfa_dport_s *dport = &fcdiag->dport;
6757
6758        if (bfa_ioc_is_disabled(&bfa->ioc))
6759                return BFA_STATUS_IOC_DISABLED;
6760
6761        /* if port is PBC disabled, return error */
6762        if (bfa_fcport_is_pbcdisabled(bfa)) {
6763                bfa_trc(dport->bfa, BFA_STATUS_PBC);
6764                return BFA_STATUS_PBC;
6765        }
6766
6767        /*
6768         * Check if dport is in dynamic mode
6769         */
6770        if (dport->dynamic) {
6771                return BFA_STATUS_DDPORT_ERR;
6772        }
6773
6774        /*
6775         * Check to see if port is disable or in dport state
6776         */
6777        if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6778            (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6779                bfa_trc(dport->bfa, 0);
6780                return BFA_STATUS_PORT_NOT_DISABLED;
6781        }
6782
6783        /*
6784         * Check if dport is busy
6785         */
6786        if (bfa_dport_is_sending_req(dport))
6787                return BFA_STATUS_DEVBUSY;
6788
6789        /*
6790         * Check if dport is already disabled
6791         */
6792        if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6793                bfa_trc(dport->bfa, 0);
6794                return BFA_STATUS_DPORT_DISABLED;
6795        }
6796
6797        dport->cbfn = cbfn;
6798        dport->cbarg = cbarg;
6799
6800        bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6801        return BFA_STATUS_OK;
6802}
6803
6804/*
6805 * Dport start -- restart dport test
6806 *
6807 *   @param[in] *bfa            - bfa data struct
6808 */
6809bfa_status_t
6810bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6811                        bfa_cb_diag_t cbfn, void *cbarg)
6812{
6813        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6814        struct bfa_dport_s *dport = &fcdiag->dport;
6815
6816        /*
6817         * Check to see if IOC is down
6818         */
6819        if (!bfa_iocfc_is_operational(bfa))
6820                return BFA_STATUS_IOC_NON_OP;
6821
6822        /*
6823         * Check if dport is in dynamic mode
6824         */
6825        if (dport->dynamic)
6826                return BFA_STATUS_DDPORT_ERR;
6827
6828        /*
6829         * Check if dport is busy
6830         */
6831        if (bfa_dport_is_sending_req(dport))
6832                return BFA_STATUS_DEVBUSY;
6833
6834        /*
6835         * Check if dport is in enabled state.
6836         * Test can only be restart when previous test has completed
6837         */
6838        if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6839                bfa_trc(dport->bfa, 0);
6840                return BFA_STATUS_DPORT_DISABLED;
6841
6842        } else {
6843                if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6844                        return BFA_STATUS_DPORT_INV_SFP;
6845
6846                if (dport->test_state == BFA_DPORT_ST_INP)
6847                        return BFA_STATUS_DEVBUSY;
6848
6849                WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
6850        }
6851
6852        bfa_trc(dport->bfa, lpcnt);
6853        bfa_trc(dport->bfa, pat);
6854
6855        dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6856        dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6857
6858        dport->cbfn = cbfn;
6859        dport->cbarg = cbarg;
6860
6861        bfa_sm_send_event(dport, BFA_DPORT_SM_START);
6862        return BFA_STATUS_OK;
6863}
6864
6865/*
6866 * Dport show -- return dport test result
6867 *
6868 *   @param[in] *bfa            - bfa data struct
6869 */
6870bfa_status_t
6871bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
6872{
6873        struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6874        struct bfa_dport_s *dport = &fcdiag->dport;
6875
6876        /*
6877         * Check to see if IOC is down
6878         */
6879        if (!bfa_iocfc_is_operational(bfa))
6880                return BFA_STATUS_IOC_NON_OP;
6881
6882        /*
6883         * Check if dport is busy
6884         */
6885        if (bfa_dport_is_sending_req(dport))
6886                return BFA_STATUS_DEVBUSY;
6887
6888        /*
6889         * Check if dport is in enabled state.
6890         */
6891        if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6892                bfa_trc(dport->bfa, 0);
6893                return BFA_STATUS_DPORT_DISABLED;
6894
6895        }
6896
6897        /*
6898         * Check if there is SFP
6899         */
6900        if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6901                return BFA_STATUS_DPORT_INV_SFP;
6902
6903        memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
6904
6905        return BFA_STATUS_OK;
6906}
6907