linux/drivers/scsi/bfa/bfa_fcpim.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   4 * Copyright (c) 2014- QLogic Corporation.
   5 * All rights reserved
   6 * www.qlogic.com
   7 *
   8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
   9 */
  10
  11#include "bfad_drv.h"
  12#include "bfa_modules.h"
  13
  14BFA_TRC_FILE(HAL, FCPIM);
  15
  16/*
  17 *  BFA ITNIM Related definitions
  18 */
  19static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
  20
  21#define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
  22        (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
  23
  24#define bfa_fcpim_additn(__itnim)                                       \
  25        list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
  26#define bfa_fcpim_delitn(__itnim)       do {                            \
  27        WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));   \
  28        bfa_itnim_update_del_itn_stats(__itnim);      \
  29        list_del(&(__itnim)->qe);      \
  30        WARN_ON(!list_empty(&(__itnim)->io_q));                         \
  31        WARN_ON(!list_empty(&(__itnim)->io_cleanup_q));                 \
  32        WARN_ON(!list_empty(&(__itnim)->pending_q));                    \
  33} while (0)
  34
  35#define bfa_itnim_online_cb(__itnim) do {                               \
  36        if ((__itnim)->bfa->fcs)                                        \
  37                bfa_cb_itnim_online((__itnim)->ditn);      \
  38        else {                                                          \
  39                bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
  40                __bfa_cb_itnim_online, (__itnim));      \
  41        }                                                               \
  42} while (0)
  43
  44#define bfa_itnim_offline_cb(__itnim) do {                              \
  45        if ((__itnim)->bfa->fcs)                                        \
  46                bfa_cb_itnim_offline((__itnim)->ditn);      \
  47        else {                                                          \
  48                bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
  49                __bfa_cb_itnim_offline, (__itnim));      \
  50        }                                                               \
  51} while (0)
  52
  53#define bfa_itnim_sler_cb(__itnim) do {                                 \
  54        if ((__itnim)->bfa->fcs)                                        \
  55                bfa_cb_itnim_sler((__itnim)->ditn);      \
  56        else {                                                          \
  57                bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
  58                __bfa_cb_itnim_sler, (__itnim));      \
  59        }                                                               \
  60} while (0)
  61
  62enum bfa_ioim_lm_ua_status {
  63        BFA_IOIM_LM_UA_RESET = 0,
  64        BFA_IOIM_LM_UA_SET = 1,
  65};
  66
  67/*
  68 *  itnim state machine event
  69 */
  70enum bfa_itnim_event {
  71        BFA_ITNIM_SM_CREATE = 1,        /*  itnim is created */
  72        BFA_ITNIM_SM_ONLINE = 2,        /*  itnim is online */
  73        BFA_ITNIM_SM_OFFLINE = 3,       /*  itnim is offline */
  74        BFA_ITNIM_SM_FWRSP = 4,         /*  firmware response */
  75        BFA_ITNIM_SM_DELETE = 5,        /*  deleting an existing itnim */
  76        BFA_ITNIM_SM_CLEANUP = 6,       /*  IO cleanup completion */
  77        BFA_ITNIM_SM_SLER = 7,          /*  second level error recovery */
  78        BFA_ITNIM_SM_HWFAIL = 8,        /*  IOC h/w failure event */
  79        BFA_ITNIM_SM_QRESUME = 9,       /*  queue space available */
  80};
  81
  82/*
  83 *  BFA IOIM related definitions
  84 */
  85#define bfa_ioim_move_to_comp_q(__ioim) do {                            \
  86        list_del(&(__ioim)->qe);                                        \
  87        list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q);    \
  88} while (0)
  89
  90
  91#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do {                  \
  92        if ((__fcpim)->profile_comp)                                    \
  93                (__fcpim)->profile_comp(__ioim);                        \
  94} while (0)
  95
  96#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do {                 \
  97        if ((__fcpim)->profile_start)                                   \
  98                (__fcpim)->profile_start(__ioim);                       \
  99} while (0)
 100
 101/*
 102 * IO state machine events
 103 */
 104enum bfa_ioim_event {
 105        BFA_IOIM_SM_START       = 1,    /*  io start request from host */
 106        BFA_IOIM_SM_COMP_GOOD   = 2,    /*  io good comp, resource free */
 107        BFA_IOIM_SM_COMP        = 3,    /*  io comp, resource is free */
 108        BFA_IOIM_SM_COMP_UTAG   = 4,    /*  io comp, resource is free */
 109        BFA_IOIM_SM_DONE        = 5,    /*  io comp, resource not free */
 110        BFA_IOIM_SM_FREE        = 6,    /*  io resource is freed */
 111        BFA_IOIM_SM_ABORT       = 7,    /*  abort request from scsi stack */
 112        BFA_IOIM_SM_ABORT_COMP  = 8,    /*  abort from f/w */
 113        BFA_IOIM_SM_ABORT_DONE  = 9,    /*  abort completion from f/w */
 114        BFA_IOIM_SM_QRESUME     = 10,   /*  CQ space available to queue IO */
 115        BFA_IOIM_SM_SGALLOCED   = 11,   /*  SG page allocation successful */
 116        BFA_IOIM_SM_SQRETRY     = 12,   /*  sequence recovery retry */
 117        BFA_IOIM_SM_HCB         = 13,   /*  bfa callback complete */
 118        BFA_IOIM_SM_CLEANUP     = 14,   /*  IO cleanup from itnim */
 119        BFA_IOIM_SM_TMSTART     = 15,   /*  IO cleanup from tskim */
 120        BFA_IOIM_SM_TMDONE      = 16,   /*  IO cleanup from tskim */
 121        BFA_IOIM_SM_HWFAIL      = 17,   /*  IOC h/w failure event */
 122        BFA_IOIM_SM_IOTOV       = 18,   /*  ITN offline TOV */
 123};
 124
 125
 126/*
 127 *  BFA TSKIM related definitions
 128 */
 129
 130/*
 131 * task management completion handling
 132 */
 133#define bfa_tskim_qcomp(__tskim, __cbfn) do {                           \
 134        bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
 135        bfa_tskim_notify_comp(__tskim);      \
 136} while (0)
 137
 138#define bfa_tskim_notify_comp(__tskim) do {                             \
 139        if ((__tskim)->notify)                                          \
 140                bfa_itnim_tskdone((__tskim)->itnim);      \
 141} while (0)
 142
 143
 144enum bfa_tskim_event {
 145        BFA_TSKIM_SM_START      = 1,    /*  TM command start            */
 146        BFA_TSKIM_SM_DONE       = 2,    /*  TM completion               */
 147        BFA_TSKIM_SM_QRESUME    = 3,    /*  resume after qfull          */
 148        BFA_TSKIM_SM_HWFAIL     = 5,    /*  IOC h/w failure event       */
 149        BFA_TSKIM_SM_HCB        = 6,    /*  BFA callback completion     */
 150        BFA_TSKIM_SM_IOS_DONE   = 7,    /*  IO and sub TM completions   */
 151        BFA_TSKIM_SM_CLEANUP    = 8,    /*  TM cleanup on ITN offline   */
 152        BFA_TSKIM_SM_CLEANUP_DONE = 9,  /*  TM abort completion */
 153        BFA_TSKIM_SM_UTAG       = 10,   /*  TM completion unknown tag  */
 154};
 155
 156/*
 157 * forward declaration for BFA ITNIM functions
 158 */
 159static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
 160static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
 161static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
 162static void     bfa_itnim_cleanp_comp(void *itnim_cbarg);
 163static void     bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
 164static void     __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
 165static void     __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
 166static void     __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
 167static void     bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
 168static void     bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
 169static void     bfa_itnim_iotov(void *itnim_arg);
 170static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
 171static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
 172static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
 173
 174/*
 175 * forward declaration of ITNIM state machine
 176 */
 177static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
 178                                        enum bfa_itnim_event event);
 179static void     bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
 180                                        enum bfa_itnim_event event);
 181static void     bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
 182                                        enum bfa_itnim_event event);
 183static void     bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
 184                                        enum bfa_itnim_event event);
 185static void     bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
 186                                        enum bfa_itnim_event event);
 187static void     bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
 188                                        enum bfa_itnim_event event);
 189static void     bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
 190                                        enum bfa_itnim_event event);
 191static void     bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
 192                                        enum bfa_itnim_event event);
 193static void     bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
 194                                        enum bfa_itnim_event event);
 195static void     bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
 196                                        enum bfa_itnim_event event);
 197static void     bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
 198                                        enum bfa_itnim_event event);
 199static void     bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
 200                                        enum bfa_itnim_event event);
 201static void     bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
 202                                        enum bfa_itnim_event event);
 203static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
 204                                        enum bfa_itnim_event event);
 205static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
 206                                        enum bfa_itnim_event event);
 207
 208/*
 209 * forward declaration for BFA IOIM functions
 210 */
 211static bfa_boolean_t    bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
 212static bfa_boolean_t    bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
 213static bfa_boolean_t    bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
 214static void             bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
 215static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
 216static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
 217static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
 218static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
 219static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
 220static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
 221
 222/*
 223 * forward declaration of BFA IO state machine
 224 */
 225static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
 226                                        enum bfa_ioim_event event);
 227static void     bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
 228                                        enum bfa_ioim_event event);
 229static void     bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
 230                                        enum bfa_ioim_event event);
 231static void     bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
 232                                        enum bfa_ioim_event event);
 233static void     bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
 234                                        enum bfa_ioim_event event);
 235static void     bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
 236                                        enum bfa_ioim_event event);
 237static void     bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
 238                                        enum bfa_ioim_event event);
 239static void     bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
 240                                        enum bfa_ioim_event event);
 241static void     bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
 242                                        enum bfa_ioim_event event);
 243static void     bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
 244                                        enum bfa_ioim_event event);
 245static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
 246                                        enum bfa_ioim_event event);
 247static void     bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
 248                                        enum bfa_ioim_event event);
 249/*
 250 * forward declaration for BFA TSKIM functions
 251 */
 252static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
 253static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
 254static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
 255                                        struct scsi_lun lun);
 256static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
 257static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
 258static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
 259static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
 260static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
 261static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
 262
 263/*
 264 * forward declaration of BFA TSKIM state machine
 265 */
 266static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
 267                                        enum bfa_tskim_event event);
 268static void     bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
 269                                        enum bfa_tskim_event event);
 270static void     bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
 271                                        enum bfa_tskim_event event);
 272static void     bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
 273                                        enum bfa_tskim_event event);
 274static void     bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
 275                                        enum bfa_tskim_event event);
 276static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
 277                                        enum bfa_tskim_event event);
 278static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
 279                                        enum bfa_tskim_event event);
 280/*
 281 *  BFA FCP Initiator Mode module
 282 */
 283
 284/*
 285 * Compute and return memory needed by FCP(im) module.
 286 */
 287static void
 288bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
 289{
 290        bfa_itnim_meminfo(cfg, km_len);
 291
 292        /*
 293         * IO memory
 294         */
 295        *km_len += cfg->fwcfg.num_ioim_reqs *
 296          (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
 297
 298        /*
 299         * task management command memory
 300         */
 301        if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
 302                cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
 303        *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
 304}
 305
 306
 307static void
 308bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
 309                struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
 310{
 311        struct bfa_fcpim_s *fcpim = &fcp->fcpim;
 312        struct bfa_s *bfa = fcp->bfa;
 313
 314        bfa_trc(bfa, cfg->drvcfg.path_tov);
 315        bfa_trc(bfa, cfg->fwcfg.num_rports);
 316        bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
 317        bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
 318
 319        fcpim->fcp              = fcp;
 320        fcpim->bfa              = bfa;
 321        fcpim->num_itnims       = cfg->fwcfg.num_rports;
 322        fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
 323        fcpim->path_tov         = cfg->drvcfg.path_tov;
 324        fcpim->delay_comp       = cfg->drvcfg.delay_comp;
 325        fcpim->profile_comp = NULL;
 326        fcpim->profile_start = NULL;
 327
 328        bfa_itnim_attach(fcpim);
 329        bfa_tskim_attach(fcpim);
 330        bfa_ioim_attach(fcpim);
 331}
 332
 333void
 334bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
 335{
 336        struct bfa_fcpim_s *fcpim = &fcp->fcpim;
 337        struct bfa_itnim_s *itnim;
 338        struct list_head *qe, *qen;
 339
 340        /* Enqueue unused ioim resources to free_q */
 341        list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
 342
 343        list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 344                itnim = (struct bfa_itnim_s *) qe;
 345                bfa_itnim_iocdisable(itnim);
 346        }
 347}
 348
 349void
 350bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
 351{
 352        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 353
 354        fcpim->path_tov = path_tov * 1000;
 355        if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
 356                fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
 357}
 358
 359u16
 360bfa_fcpim_path_tov_get(struct bfa_s *bfa)
 361{
 362        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 363
 364        return fcpim->path_tov / 1000;
 365}
 366
 367#define bfa_fcpim_add_iostats(__l, __r, __stats)        \
 368        (__l->__stats += __r->__stats)
 369
 370void
 371bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
 372                struct bfa_itnim_iostats_s *rstats)
 373{
 374        bfa_fcpim_add_iostats(lstats, rstats, total_ios);
 375        bfa_fcpim_add_iostats(lstats, rstats, qresumes);
 376        bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
 377        bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
 378        bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
 379        bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
 380        bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
 381        bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
 382        bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
 383        bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
 384        bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
 385        bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
 386        bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
 387        bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
 388        bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
 389        bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
 390        bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
 391        bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
 392        bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
 393        bfa_fcpim_add_iostats(lstats, rstats, onlines);
 394        bfa_fcpim_add_iostats(lstats, rstats, offlines);
 395        bfa_fcpim_add_iostats(lstats, rstats, creates);
 396        bfa_fcpim_add_iostats(lstats, rstats, deletes);
 397        bfa_fcpim_add_iostats(lstats, rstats, create_comps);
 398        bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
 399        bfa_fcpim_add_iostats(lstats, rstats, sler_events);
 400        bfa_fcpim_add_iostats(lstats, rstats, fw_create);
 401        bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
 402        bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
 403        bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
 404        bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
 405        bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
 406        bfa_fcpim_add_iostats(lstats, rstats, tm_success);
 407        bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
 408        bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
 409        bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
 410        bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
 411        bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
 412        bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
 413        bfa_fcpim_add_iostats(lstats, rstats, io_comps);
 414        bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
 415        bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
 416        bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
 417        bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
 418}
 419
 420bfa_status_t
 421bfa_fcpim_port_iostats(struct bfa_s *bfa,
 422                struct bfa_itnim_iostats_s *stats, u8 lp_tag)
 423{
 424        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 425        struct list_head *qe, *qen;
 426        struct bfa_itnim_s *itnim;
 427
 428        /* accumulate IO stats from itnim */
 429        memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
 430        list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 431                itnim = (struct bfa_itnim_s *) qe;
 432                if (itnim->rport->rport_info.lp_tag != lp_tag)
 433                        continue;
 434                bfa_fcpim_add_stats(stats, &(itnim->stats));
 435        }
 436        return BFA_STATUS_OK;
 437}
 438
 439static void
 440bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
 441{
 442        struct bfa_itnim_latency_s *io_lat =
 443                        &(ioim->itnim->ioprofile.io_latency);
 444        u32 val, idx;
 445
 446        val = (u32)(jiffies - ioim->start_time);
 447        idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
 448        bfa_itnim_ioprofile_update(ioim->itnim, idx);
 449
 450        io_lat->count[idx]++;
 451        io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
 452        io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
 453        io_lat->avg[idx] += val;
 454}
 455
 456static void
 457bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
 458{
 459        ioim->start_time = jiffies;
 460}
 461
 462bfa_status_t
 463bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time)
 464{
 465        struct bfa_itnim_s *itnim;
 466        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 467        struct list_head *qe, *qen;
 468
 469        /* accumulate IO stats from itnim */
 470        list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 471                itnim = (struct bfa_itnim_s *) qe;
 472                bfa_itnim_clear_stats(itnim);
 473        }
 474        fcpim->io_profile = BFA_TRUE;
 475        fcpim->io_profile_start_time = time;
 476        fcpim->profile_comp = bfa_ioim_profile_comp;
 477        fcpim->profile_start = bfa_ioim_profile_start;
 478        return BFA_STATUS_OK;
 479}
 480
 481bfa_status_t
 482bfa_fcpim_profile_off(struct bfa_s *bfa)
 483{
 484        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 485        fcpim->io_profile = BFA_FALSE;
 486        fcpim->io_profile_start_time = 0;
 487        fcpim->profile_comp = NULL;
 488        fcpim->profile_start = NULL;
 489        return BFA_STATUS_OK;
 490}
 491
 492u16
 493bfa_fcpim_qdepth_get(struct bfa_s *bfa)
 494{
 495        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 496
 497        return fcpim->q_depth;
 498}
 499
 500/*
 501 *  BFA ITNIM module state machine functions
 502 */
 503
 504/*
 505 * Beginning/unallocated state - no events expected.
 506 */
 507static void
 508bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 509{
 510        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 511        bfa_trc(itnim->bfa, event);
 512
 513        switch (event) {
 514        case BFA_ITNIM_SM_CREATE:
 515                bfa_sm_set_state(itnim, bfa_itnim_sm_created);
 516                itnim->is_online = BFA_FALSE;
 517                bfa_fcpim_additn(itnim);
 518                break;
 519
 520        default:
 521                bfa_sm_fault(itnim->bfa, event);
 522        }
 523}
 524
 525/*
 526 * Beginning state, only online event expected.
 527 */
 528static void
 529bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 530{
 531        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 532        bfa_trc(itnim->bfa, event);
 533
 534        switch (event) {
 535        case BFA_ITNIM_SM_ONLINE:
 536                if (bfa_itnim_send_fwcreate(itnim))
 537                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
 538                else
 539                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
 540                break;
 541
 542        case BFA_ITNIM_SM_DELETE:
 543                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 544                bfa_fcpim_delitn(itnim);
 545                break;
 546
 547        case BFA_ITNIM_SM_HWFAIL:
 548                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 549                break;
 550
 551        default:
 552                bfa_sm_fault(itnim->bfa, event);
 553        }
 554}
 555
 556/*
 557 *      Waiting for itnim create response from firmware.
 558 */
 559static void
 560bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 561{
 562        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 563        bfa_trc(itnim->bfa, event);
 564
 565        switch (event) {
 566        case BFA_ITNIM_SM_FWRSP:
 567                bfa_sm_set_state(itnim, bfa_itnim_sm_online);
 568                itnim->is_online = BFA_TRUE;
 569                bfa_itnim_iotov_online(itnim);
 570                bfa_itnim_online_cb(itnim);
 571                break;
 572
 573        case BFA_ITNIM_SM_DELETE:
 574                bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
 575                break;
 576
 577        case BFA_ITNIM_SM_OFFLINE:
 578                if (bfa_itnim_send_fwdelete(itnim))
 579                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
 580                else
 581                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
 582                break;
 583
 584        case BFA_ITNIM_SM_HWFAIL:
 585                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 586                break;
 587
 588        default:
 589                bfa_sm_fault(itnim->bfa, event);
 590        }
 591}
 592
 593static void
 594bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
 595                        enum bfa_itnim_event event)
 596{
 597        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 598        bfa_trc(itnim->bfa, event);
 599
 600        switch (event) {
 601        case BFA_ITNIM_SM_QRESUME:
 602                bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
 603                bfa_itnim_send_fwcreate(itnim);
 604                break;
 605
 606        case BFA_ITNIM_SM_DELETE:
 607                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 608                bfa_reqq_wcancel(&itnim->reqq_wait);
 609                bfa_fcpim_delitn(itnim);
 610                break;
 611
 612        case BFA_ITNIM_SM_OFFLINE:
 613                bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
 614                bfa_reqq_wcancel(&itnim->reqq_wait);
 615                bfa_itnim_offline_cb(itnim);
 616                break;
 617
 618        case BFA_ITNIM_SM_HWFAIL:
 619                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 620                bfa_reqq_wcancel(&itnim->reqq_wait);
 621                break;
 622
 623        default:
 624                bfa_sm_fault(itnim->bfa, event);
 625        }
 626}
 627
 628/*
 629 * Waiting for itnim create response from firmware, a delete is pending.
 630 */
 631static void
 632bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
 633                                enum bfa_itnim_event event)
 634{
 635        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 636        bfa_trc(itnim->bfa, event);
 637
 638        switch (event) {
 639        case BFA_ITNIM_SM_FWRSP:
 640                if (bfa_itnim_send_fwdelete(itnim))
 641                        bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
 642                else
 643                        bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
 644                break;
 645
 646        case BFA_ITNIM_SM_HWFAIL:
 647                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 648                bfa_fcpim_delitn(itnim);
 649                break;
 650
 651        default:
 652                bfa_sm_fault(itnim->bfa, event);
 653        }
 654}
 655
 656/*
 657 * Online state - normal parking state.
 658 */
 659static void
 660bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 661{
 662        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 663        bfa_trc(itnim->bfa, event);
 664
 665        switch (event) {
 666        case BFA_ITNIM_SM_OFFLINE:
 667                bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
 668                itnim->is_online = BFA_FALSE;
 669                bfa_itnim_iotov_start(itnim);
 670                bfa_itnim_cleanup(itnim);
 671                break;
 672
 673        case BFA_ITNIM_SM_DELETE:
 674                bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
 675                itnim->is_online = BFA_FALSE;
 676                bfa_itnim_cleanup(itnim);
 677                break;
 678
 679        case BFA_ITNIM_SM_SLER:
 680                bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
 681                itnim->is_online = BFA_FALSE;
 682                bfa_itnim_iotov_start(itnim);
 683                bfa_itnim_sler_cb(itnim);
 684                break;
 685
 686        case BFA_ITNIM_SM_HWFAIL:
 687                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 688                itnim->is_online = BFA_FALSE;
 689                bfa_itnim_iotov_start(itnim);
 690                bfa_itnim_iocdisable_cleanup(itnim);
 691                break;
 692
 693        default:
 694                bfa_sm_fault(itnim->bfa, event);
 695        }
 696}
 697
 698/*
 699 * Second level error recovery need.
 700 */
 701static void
 702bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 703{
 704        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 705        bfa_trc(itnim->bfa, event);
 706
 707        switch (event) {
 708        case BFA_ITNIM_SM_OFFLINE:
 709                bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
 710                bfa_itnim_cleanup(itnim);
 711                break;
 712
 713        case BFA_ITNIM_SM_DELETE:
 714                bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
 715                bfa_itnim_cleanup(itnim);
 716                bfa_itnim_iotov_delete(itnim);
 717                break;
 718
 719        case BFA_ITNIM_SM_HWFAIL:
 720                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 721                bfa_itnim_iocdisable_cleanup(itnim);
 722                break;
 723
 724        default:
 725                bfa_sm_fault(itnim->bfa, event);
 726        }
 727}
 728
 729/*
 730 * Going offline. Waiting for active IO cleanup.
 731 */
 732static void
 733bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
 734                                 enum bfa_itnim_event event)
 735{
 736        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 737        bfa_trc(itnim->bfa, event);
 738
 739        switch (event) {
 740        case BFA_ITNIM_SM_CLEANUP:
 741                if (bfa_itnim_send_fwdelete(itnim))
 742                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
 743                else
 744                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
 745                break;
 746
 747        case BFA_ITNIM_SM_DELETE:
 748                bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
 749                bfa_itnim_iotov_delete(itnim);
 750                break;
 751
 752        case BFA_ITNIM_SM_HWFAIL:
 753                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 754                bfa_itnim_iocdisable_cleanup(itnim);
 755                bfa_itnim_offline_cb(itnim);
 756                break;
 757
 758        case BFA_ITNIM_SM_SLER:
 759                break;
 760
 761        default:
 762                bfa_sm_fault(itnim->bfa, event);
 763        }
 764}
 765
 766/*
 767 * Deleting itnim. Waiting for active IO cleanup.
 768 */
 769static void
 770bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
 771                                enum bfa_itnim_event event)
 772{
 773        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 774        bfa_trc(itnim->bfa, event);
 775
 776        switch (event) {
 777        case BFA_ITNIM_SM_CLEANUP:
 778                if (bfa_itnim_send_fwdelete(itnim))
 779                        bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
 780                else
 781                        bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
 782                break;
 783
 784        case BFA_ITNIM_SM_HWFAIL:
 785                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 786                bfa_itnim_iocdisable_cleanup(itnim);
 787                break;
 788
 789        default:
 790                bfa_sm_fault(itnim->bfa, event);
 791        }
 792}
 793
 794/*
 795 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
 796 */
 797static void
 798bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 799{
 800        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 801        bfa_trc(itnim->bfa, event);
 802
 803        switch (event) {
 804        case BFA_ITNIM_SM_FWRSP:
 805                bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
 806                bfa_itnim_offline_cb(itnim);
 807                break;
 808
 809        case BFA_ITNIM_SM_DELETE:
 810                bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
 811                break;
 812
 813        case BFA_ITNIM_SM_HWFAIL:
 814                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 815                bfa_itnim_offline_cb(itnim);
 816                break;
 817
 818        default:
 819                bfa_sm_fault(itnim->bfa, event);
 820        }
 821}
 822
 823static void
 824bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
 825                        enum bfa_itnim_event event)
 826{
 827        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 828        bfa_trc(itnim->bfa, event);
 829
 830        switch (event) {
 831        case BFA_ITNIM_SM_QRESUME:
 832                bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
 833                bfa_itnim_send_fwdelete(itnim);
 834                break;
 835
 836        case BFA_ITNIM_SM_DELETE:
 837                bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
 838                break;
 839
 840        case BFA_ITNIM_SM_HWFAIL:
 841                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 842                bfa_reqq_wcancel(&itnim->reqq_wait);
 843                bfa_itnim_offline_cb(itnim);
 844                break;
 845
 846        default:
 847                bfa_sm_fault(itnim->bfa, event);
 848        }
 849}
 850
 851/*
 852 * Offline state.
 853 */
 854static void
 855bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 856{
 857        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 858        bfa_trc(itnim->bfa, event);
 859
 860        switch (event) {
 861        case BFA_ITNIM_SM_DELETE:
 862                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 863                bfa_itnim_iotov_delete(itnim);
 864                bfa_fcpim_delitn(itnim);
 865                break;
 866
 867        case BFA_ITNIM_SM_ONLINE:
 868                if (bfa_itnim_send_fwcreate(itnim))
 869                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
 870                else
 871                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
 872                break;
 873
 874        case BFA_ITNIM_SM_HWFAIL:
 875                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 876                break;
 877
 878        default:
 879                bfa_sm_fault(itnim->bfa, event);
 880        }
 881}
 882
 883static void
 884bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
 885                                enum bfa_itnim_event event)
 886{
 887        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 888        bfa_trc(itnim->bfa, event);
 889
 890        switch (event) {
 891        case BFA_ITNIM_SM_DELETE:
 892                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 893                bfa_itnim_iotov_delete(itnim);
 894                bfa_fcpim_delitn(itnim);
 895                break;
 896
 897        case BFA_ITNIM_SM_OFFLINE:
 898                bfa_itnim_offline_cb(itnim);
 899                break;
 900
 901        case BFA_ITNIM_SM_ONLINE:
 902                if (bfa_itnim_send_fwcreate(itnim))
 903                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
 904                else
 905                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
 906                break;
 907
 908        case BFA_ITNIM_SM_HWFAIL:
 909                break;
 910
 911        default:
 912                bfa_sm_fault(itnim->bfa, event);
 913        }
 914}
 915
 916/*
 917 * Itnim is deleted, waiting for firmware response to delete.
 918 */
 919static void
 920bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 921{
 922        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 923        bfa_trc(itnim->bfa, event);
 924
 925        switch (event) {
 926        case BFA_ITNIM_SM_FWRSP:
 927        case BFA_ITNIM_SM_HWFAIL:
 928                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 929                bfa_fcpim_delitn(itnim);
 930                break;
 931
 932        default:
 933                bfa_sm_fault(itnim->bfa, event);
 934        }
 935}
 936
 937static void
 938bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
 939                enum bfa_itnim_event event)
 940{
 941        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 942        bfa_trc(itnim->bfa, event);
 943
 944        switch (event) {
 945        case BFA_ITNIM_SM_QRESUME:
 946                bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
 947                bfa_itnim_send_fwdelete(itnim);
 948                break;
 949
 950        case BFA_ITNIM_SM_HWFAIL:
 951                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 952                bfa_reqq_wcancel(&itnim->reqq_wait);
 953                bfa_fcpim_delitn(itnim);
 954                break;
 955
 956        default:
 957                bfa_sm_fault(itnim->bfa, event);
 958        }
 959}
 960
 961/*
 962 * Initiate cleanup of all IOs on an IOC failure.
 963 */
 964static void
 965bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
 966{
 967        struct bfa_tskim_s *tskim;
 968        struct bfa_ioim_s *ioim;
 969        struct list_head        *qe, *qen;
 970
 971        list_for_each_safe(qe, qen, &itnim->tsk_q) {
 972                tskim = (struct bfa_tskim_s *) qe;
 973                bfa_tskim_iocdisable(tskim);
 974        }
 975
 976        list_for_each_safe(qe, qen, &itnim->io_q) {
 977                ioim = (struct bfa_ioim_s *) qe;
 978                bfa_ioim_iocdisable(ioim);
 979        }
 980
 981        /*
 982         * For IO request in pending queue, we pretend an early timeout.
 983         */
 984        list_for_each_safe(qe, qen, &itnim->pending_q) {
 985                ioim = (struct bfa_ioim_s *) qe;
 986                bfa_ioim_tov(ioim);
 987        }
 988
 989        list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
 990                ioim = (struct bfa_ioim_s *) qe;
 991                bfa_ioim_iocdisable(ioim);
 992        }
 993}
 994
 995/*
 996 * IO cleanup completion
 997 */
 998static void
 999bfa_itnim_cleanp_comp(void *itnim_cbarg)
1000{
1001        struct bfa_itnim_s *itnim = itnim_cbarg;
1002
1003        bfa_stats(itnim, cleanup_comps);
1004        bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1005}
1006
1007/*
1008 * Initiate cleanup of all IOs.
1009 */
1010static void
1011bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1012{
1013        struct bfa_ioim_s  *ioim;
1014        struct bfa_tskim_s *tskim;
1015        struct list_head        *qe, *qen;
1016
1017        bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1018
1019        list_for_each_safe(qe, qen, &itnim->io_q) {
1020                ioim = (struct bfa_ioim_s *) qe;
1021
1022                /*
1023                 * Move IO to a cleanup queue from active queue so that a later
1024                 * TM will not pickup this IO.
1025                 */
1026                list_del(&ioim->qe);
1027                list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1028
1029                bfa_wc_up(&itnim->wc);
1030                bfa_ioim_cleanup(ioim);
1031        }
1032
1033        list_for_each_safe(qe, qen, &itnim->tsk_q) {
1034                tskim = (struct bfa_tskim_s *) qe;
1035                bfa_wc_up(&itnim->wc);
1036                bfa_tskim_cleanup(tskim);
1037        }
1038
1039        bfa_wc_wait(&itnim->wc);
1040}
1041
1042static void
1043__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1044{
1045        struct bfa_itnim_s *itnim = cbarg;
1046
1047        if (complete)
1048                bfa_cb_itnim_online(itnim->ditn);
1049}
1050
1051static void
1052__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1053{
1054        struct bfa_itnim_s *itnim = cbarg;
1055
1056        if (complete)
1057                bfa_cb_itnim_offline(itnim->ditn);
1058}
1059
1060static void
1061__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1062{
1063        struct bfa_itnim_s *itnim = cbarg;
1064
1065        if (complete)
1066                bfa_cb_itnim_sler(itnim->ditn);
1067}
1068
1069/*
1070 * Call to resume any I/O requests waiting for room in request queue.
1071 */
1072static void
1073bfa_itnim_qresume(void *cbarg)
1074{
1075        struct bfa_itnim_s *itnim = cbarg;
1076
1077        bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1078}
1079
1080/*
1081 *  bfa_itnim_public
1082 */
1083
1084void
1085bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1086{
1087        bfa_wc_down(&itnim->wc);
1088}
1089
1090void
1091bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1092{
1093        bfa_wc_down(&itnim->wc);
1094}
1095
1096void
1097bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
1098{
1099        /*
1100         * ITN memory
1101         */
1102        *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1103}
1104
1105void
1106bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
1107{
1108        struct bfa_s    *bfa = fcpim->bfa;
1109        struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
1110        struct bfa_itnim_s *itnim;
1111        int     i, j;
1112
1113        INIT_LIST_HEAD(&fcpim->itnim_q);
1114
1115        itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
1116        fcpim->itnim_arr = itnim;
1117
1118        for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1119                memset(itnim, 0, sizeof(struct bfa_itnim_s));
1120                itnim->bfa = bfa;
1121                itnim->fcpim = fcpim;
1122                itnim->reqq = BFA_REQQ_QOS_LO;
1123                itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1124                itnim->iotov_active = BFA_FALSE;
1125                bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1126
1127                INIT_LIST_HEAD(&itnim->io_q);
1128                INIT_LIST_HEAD(&itnim->io_cleanup_q);
1129                INIT_LIST_HEAD(&itnim->pending_q);
1130                INIT_LIST_HEAD(&itnim->tsk_q);
1131                INIT_LIST_HEAD(&itnim->delay_comp_q);
1132                for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1133                        itnim->ioprofile.io_latency.min[j] = ~0;
1134                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1135        }
1136
1137        bfa_mem_kva_curp(fcp) = (u8 *) itnim;
1138}
1139
1140void
1141bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1142{
1143        bfa_stats(itnim, ioc_disabled);
1144        bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1145}
1146
1147static bfa_boolean_t
1148bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1149{
1150        struct bfi_itn_create_req_s *m;
1151
1152        itnim->msg_no++;
1153
1154        /*
1155         * check for room in queue to send request now
1156         */
1157        m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1158        if (!m) {
1159                bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1160                return BFA_FALSE;
1161        }
1162
1163        bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
1164                        bfa_fn_lpu(itnim->bfa));
1165        m->fw_handle = itnim->rport->fw_handle;
1166        m->class = FC_CLASS_3;
1167        m->seq_rec = itnim->seq_rec;
1168        m->msg_no = itnim->msg_no;
1169        bfa_stats(itnim, fw_create);
1170
1171        /*
1172         * queue I/O message to firmware
1173         */
1174        bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1175        return BFA_TRUE;
1176}
1177
1178static bfa_boolean_t
1179bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1180{
1181        struct bfi_itn_delete_req_s *m;
1182
1183        /*
1184         * check for room in queue to send request now
1185         */
1186        m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1187        if (!m) {
1188                bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1189                return BFA_FALSE;
1190        }
1191
1192        bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
1193                        bfa_fn_lpu(itnim->bfa));
1194        m->fw_handle = itnim->rport->fw_handle;
1195        bfa_stats(itnim, fw_delete);
1196
1197        /*
1198         * queue I/O message to firmware
1199         */
1200        bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1201        return BFA_TRUE;
1202}
1203
1204/*
1205 * Cleanup all pending failed inflight requests.
1206 */
1207static void
1208bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1209{
1210        struct bfa_ioim_s *ioim;
1211        struct list_head *qe, *qen;
1212
1213        list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1214                ioim = (struct bfa_ioim_s *)qe;
1215                bfa_ioim_delayed_comp(ioim, iotov);
1216        }
1217}
1218
1219/*
1220 * Start all pending IO requests.
1221 */
1222static void
1223bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1224{
1225        struct bfa_ioim_s *ioim;
1226
1227        bfa_itnim_iotov_stop(itnim);
1228
1229        /*
1230         * Abort all inflight IO requests in the queue
1231         */
1232        bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1233
1234        /*
1235         * Start all pending IO requests.
1236         */
1237        while (!list_empty(&itnim->pending_q)) {
1238                bfa_q_deq(&itnim->pending_q, &ioim);
1239                list_add_tail(&ioim->qe, &itnim->io_q);
1240                bfa_ioim_start(ioim);
1241        }
1242}
1243
1244/*
1245 * Fail all pending IO requests
1246 */
1247static void
1248bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1249{
1250        struct bfa_ioim_s *ioim;
1251
1252        /*
1253         * Fail all inflight IO requests in the queue
1254         */
1255        bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1256
1257        /*
1258         * Fail any pending IO requests.
1259         */
1260        while (!list_empty(&itnim->pending_q)) {
1261                bfa_q_deq(&itnim->pending_q, &ioim);
1262                list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1263                bfa_ioim_tov(ioim);
1264        }
1265}
1266
1267/*
1268 * IO TOV timer callback. Fail any pending IO requests.
1269 */
1270static void
1271bfa_itnim_iotov(void *itnim_arg)
1272{
1273        struct bfa_itnim_s *itnim = itnim_arg;
1274
1275        itnim->iotov_active = BFA_FALSE;
1276
1277        bfa_cb_itnim_tov_begin(itnim->ditn);
1278        bfa_itnim_iotov_cleanup(itnim);
1279        bfa_cb_itnim_tov(itnim->ditn);
1280}
1281
1282/*
1283 * Start IO TOV timer for failing back pending IO requests in offline state.
1284 */
1285static void
1286bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1287{
1288        if (itnim->fcpim->path_tov > 0) {
1289
1290                itnim->iotov_active = BFA_TRUE;
1291                WARN_ON(!bfa_itnim_hold_io(itnim));
1292                bfa_timer_start(itnim->bfa, &itnim->timer,
1293                        bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1294        }
1295}
1296
1297/*
1298 * Stop IO TOV timer.
1299 */
1300static void
1301bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1302{
1303        if (itnim->iotov_active) {
1304                itnim->iotov_active = BFA_FALSE;
1305                bfa_timer_stop(&itnim->timer);
1306        }
1307}
1308
1309/*
1310 * Stop IO TOV timer.
1311 */
1312static void
1313bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1314{
1315        bfa_boolean_t pathtov_active = BFA_FALSE;
1316
1317        if (itnim->iotov_active)
1318                pathtov_active = BFA_TRUE;
1319
1320        bfa_itnim_iotov_stop(itnim);
1321        if (pathtov_active)
1322                bfa_cb_itnim_tov_begin(itnim->ditn);
1323        bfa_itnim_iotov_cleanup(itnim);
1324        if (pathtov_active)
1325                bfa_cb_itnim_tov(itnim->ditn);
1326}
1327
1328static void
1329bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1330{
1331        struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1332        fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1333                itnim->stats.iocomp_aborted;
1334        fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1335                itnim->stats.iocomp_timedout;
1336        fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1337                itnim->stats.iocom_sqer_needed;
1338        fcpim->del_itn_stats.del_itn_iocom_res_free +=
1339                itnim->stats.iocom_res_free;
1340        fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1341                itnim->stats.iocom_hostabrts;
1342        fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1343        fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1344        fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1345}
1346
1347/*
1348 * bfa_itnim_public
1349 */
1350
1351/*
1352 * Itnim interrupt processing.
1353 */
1354void
1355bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1356{
1357        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1358        union bfi_itn_i2h_msg_u msg;
1359        struct bfa_itnim_s *itnim;
1360
1361        bfa_trc(bfa, m->mhdr.msg_id);
1362
1363        msg.msg = m;
1364
1365        switch (m->mhdr.msg_id) {
1366        case BFI_ITN_I2H_CREATE_RSP:
1367                itnim = BFA_ITNIM_FROM_TAG(fcpim,
1368                                                msg.create_rsp->bfa_handle);
1369                WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1370                bfa_stats(itnim, create_comps);
1371                bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1372                break;
1373
1374        case BFI_ITN_I2H_DELETE_RSP:
1375                itnim = BFA_ITNIM_FROM_TAG(fcpim,
1376                                                msg.delete_rsp->bfa_handle);
1377                WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1378                bfa_stats(itnim, delete_comps);
1379                bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1380                break;
1381
1382        case BFI_ITN_I2H_SLER_EVENT:
1383                itnim = BFA_ITNIM_FROM_TAG(fcpim,
1384                                                msg.sler_event->bfa_handle);
1385                bfa_stats(itnim, sler_events);
1386                bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1387                break;
1388
1389        default:
1390                bfa_trc(bfa, m->mhdr.msg_id);
1391                WARN_ON(1);
1392        }
1393}
1394
1395/*
1396 * bfa_itnim_api
1397 */
1398
1399struct bfa_itnim_s *
1400bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1401{
1402        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1403        struct bfa_itnim_s *itnim;
1404
1405        bfa_itn_create(bfa, rport, bfa_itnim_isr);
1406
1407        itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1408        WARN_ON(itnim->rport != rport);
1409
1410        itnim->ditn = ditn;
1411
1412        bfa_stats(itnim, creates);
1413        bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1414
1415        return itnim;
1416}
1417
1418void
1419bfa_itnim_delete(struct bfa_itnim_s *itnim)
1420{
1421        bfa_stats(itnim, deletes);
1422        bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1423}
1424
1425void
1426bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1427{
1428        itnim->seq_rec = seq_rec;
1429        bfa_stats(itnim, onlines);
1430        bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1431}
1432
1433void
1434bfa_itnim_offline(struct bfa_itnim_s *itnim)
1435{
1436        bfa_stats(itnim, offlines);
1437        bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1438}
1439
1440/*
1441 * Return true if itnim is considered offline for holding off IO request.
1442 * IO is not held if itnim is being deleted.
1443 */
1444bfa_boolean_t
1445bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1446{
1447        return itnim->fcpim->path_tov && itnim->iotov_active &&
1448                (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1449                 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1450                 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1451                 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1452                 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1453                 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1454}
1455
1456#define bfa_io_lat_clock_res_div        HZ
1457#define bfa_io_lat_clock_res_mul        1000
1458bfa_status_t
1459bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1460                        struct bfa_itnim_ioprofile_s *ioprofile)
1461{
1462        struct bfa_fcpim_s *fcpim;
1463
1464        if (!itnim)
1465                return BFA_STATUS_NO_FCPIM_NEXUS;
1466
1467        fcpim = BFA_FCPIM(itnim->bfa);
1468
1469        if (!fcpim->io_profile)
1470                return BFA_STATUS_IOPROFILE_OFF;
1471
1472        itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1473        /* unsigned 32-bit time_t overflow here in y2106 */
1474        itnim->ioprofile.io_profile_start_time =
1475                                bfa_io_profile_start_time(itnim->bfa);
1476        itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1477        itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1478        *ioprofile = itnim->ioprofile;
1479
1480        return BFA_STATUS_OK;
1481}
1482
1483void
1484bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1485{
1486        int j;
1487
1488        if (!itnim)
1489                return;
1490
1491        memset(&itnim->stats, 0, sizeof(itnim->stats));
1492        memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1493        for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1494                itnim->ioprofile.io_latency.min[j] = ~0;
1495}
1496
1497/*
1498 *  BFA IO module state machine functions
1499 */
1500
1501/*
1502 * IO is not started (unallocated).
1503 */
1504static void
1505bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1506{
1507        switch (event) {
1508        case BFA_IOIM_SM_START:
1509                if (!bfa_itnim_is_online(ioim->itnim)) {
1510                        if (!bfa_itnim_hold_io(ioim->itnim)) {
1511                                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1512                                list_del(&ioim->qe);
1513                                list_add_tail(&ioim->qe,
1514                                        &ioim->fcpim->ioim_comp_q);
1515                                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1516                                                __bfa_cb_ioim_pathtov, ioim);
1517                        } else {
1518                                list_del(&ioim->qe);
1519                                list_add_tail(&ioim->qe,
1520                                        &ioim->itnim->pending_q);
1521                        }
1522                        break;
1523                }
1524
1525                if (ioim->nsges > BFI_SGE_INLINE) {
1526                        if (!bfa_ioim_sgpg_alloc(ioim)) {
1527                                bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1528                                return;
1529                        }
1530                }
1531
1532                if (!bfa_ioim_send_ioreq(ioim)) {
1533                        bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1534                        break;
1535                }
1536
1537                bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1538                break;
1539
1540        case BFA_IOIM_SM_IOTOV:
1541                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1542                bfa_ioim_move_to_comp_q(ioim);
1543                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1544                                __bfa_cb_ioim_pathtov, ioim);
1545                break;
1546
1547        case BFA_IOIM_SM_ABORT:
1548                /*
1549                 * IO in pending queue can get abort requests. Complete abort
1550                 * requests immediately.
1551                 */
1552                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1553                WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1554                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1555                        __bfa_cb_ioim_abort, ioim);
1556                break;
1557
1558        default:
1559                bfa_sm_fault(ioim->bfa, event);
1560        }
1561}
1562
1563/*
1564 * IO is waiting for SG pages.
1565 */
1566static void
1567bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1568{
1569        bfa_trc(ioim->bfa, ioim->iotag);
1570        bfa_trc(ioim->bfa, event);
1571
1572        switch (event) {
1573        case BFA_IOIM_SM_SGALLOCED:
1574                if (!bfa_ioim_send_ioreq(ioim)) {
1575                        bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1576                        break;
1577                }
1578                bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1579                break;
1580
1581        case BFA_IOIM_SM_CLEANUP:
1582                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1583                bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1584                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1585                              ioim);
1586                bfa_ioim_notify_cleanup(ioim);
1587                break;
1588
1589        case BFA_IOIM_SM_ABORT:
1590                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1591                bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1592                bfa_ioim_move_to_comp_q(ioim);
1593                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1594                              ioim);
1595                break;
1596
1597        case BFA_IOIM_SM_HWFAIL:
1598                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1599                bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1600                bfa_ioim_move_to_comp_q(ioim);
1601                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1602                              ioim);
1603                break;
1604
1605        default:
1606                bfa_sm_fault(ioim->bfa, event);
1607        }
1608}
1609
1610/*
1611 * IO is active.
1612 */
1613static void
1614bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1615{
1616        switch (event) {
1617        case BFA_IOIM_SM_COMP_GOOD:
1618                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1619                bfa_ioim_move_to_comp_q(ioim);
1620                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1621                              __bfa_cb_ioim_good_comp, ioim);
1622                break;
1623
1624        case BFA_IOIM_SM_COMP:
1625                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1626                bfa_ioim_move_to_comp_q(ioim);
1627                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1628                              ioim);
1629                break;
1630
1631        case BFA_IOIM_SM_DONE:
1632                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1633                bfa_ioim_move_to_comp_q(ioim);
1634                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1635                              ioim);
1636                break;
1637
1638        case BFA_IOIM_SM_ABORT:
1639                ioim->iosp->abort_explicit = BFA_TRUE;
1640                ioim->io_cbfn = __bfa_cb_ioim_abort;
1641
1642                if (bfa_ioim_send_abort(ioim))
1643                        bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1644                else {
1645                        bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1646                        bfa_stats(ioim->itnim, qwait);
1647                        bfa_reqq_wait(ioim->bfa, ioim->reqq,
1648                                          &ioim->iosp->reqq_wait);
1649                }
1650                break;
1651
1652        case BFA_IOIM_SM_CLEANUP:
1653                ioim->iosp->abort_explicit = BFA_FALSE;
1654                ioim->io_cbfn = __bfa_cb_ioim_failed;
1655
1656                if (bfa_ioim_send_abort(ioim))
1657                        bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1658                else {
1659                        bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1660                        bfa_stats(ioim->itnim, qwait);
1661                        bfa_reqq_wait(ioim->bfa, ioim->reqq,
1662                                          &ioim->iosp->reqq_wait);
1663                }
1664                break;
1665
1666        case BFA_IOIM_SM_HWFAIL:
1667                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1668                bfa_ioim_move_to_comp_q(ioim);
1669                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1670                              ioim);
1671                break;
1672
1673        case BFA_IOIM_SM_SQRETRY:
1674                if (bfa_ioim_maxretry_reached(ioim)) {
1675                        /* max retry reached, free IO */
1676                        bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1677                        bfa_ioim_move_to_comp_q(ioim);
1678                        bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1679                                        __bfa_cb_ioim_failed, ioim);
1680                        break;
1681                }
1682                /* waiting for IO tag resource free */
1683                bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1684                break;
1685
1686        default:
1687                bfa_sm_fault(ioim->bfa, event);
1688        }
1689}
1690
1691/*
1692 * IO is retried with new tag.
1693 */
1694static void
1695bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1696{
1697        switch (event) {
1698        case BFA_IOIM_SM_FREE:
1699                /* abts and rrq done. Now retry the IO with new tag */
1700                bfa_ioim_update_iotag(ioim);
1701                if (!bfa_ioim_send_ioreq(ioim)) {
1702                        bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1703                        break;
1704                }
1705                bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1706        break;
1707
1708        case BFA_IOIM_SM_CLEANUP:
1709                ioim->iosp->abort_explicit = BFA_FALSE;
1710                ioim->io_cbfn = __bfa_cb_ioim_failed;
1711
1712                if (bfa_ioim_send_abort(ioim))
1713                        bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1714                else {
1715                        bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1716                        bfa_stats(ioim->itnim, qwait);
1717                        bfa_reqq_wait(ioim->bfa, ioim->reqq,
1718                                          &ioim->iosp->reqq_wait);
1719                }
1720        break;
1721
1722        case BFA_IOIM_SM_HWFAIL:
1723                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1724                bfa_ioim_move_to_comp_q(ioim);
1725                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1726                         __bfa_cb_ioim_failed, ioim);
1727                break;
1728
1729        case BFA_IOIM_SM_ABORT:
1730                /* in this state IO abort is done.
1731                 * Waiting for IO tag resource free.
1732                 */
1733                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1734                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1735                              ioim);
1736                break;
1737
1738        default:
1739                bfa_sm_fault(ioim->bfa, event);
1740        }
1741}
1742
1743/*
1744 * IO is being aborted, waiting for completion from firmware.
1745 */
1746static void
1747bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1748{
1749        bfa_trc(ioim->bfa, ioim->iotag);
1750        bfa_trc(ioim->bfa, event);
1751
1752        switch (event) {
1753        case BFA_IOIM_SM_COMP_GOOD:
1754        case BFA_IOIM_SM_COMP:
1755        case BFA_IOIM_SM_DONE:
1756        case BFA_IOIM_SM_FREE:
1757                break;
1758
1759        case BFA_IOIM_SM_ABORT_DONE:
1760                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1761                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1762                              ioim);
1763                break;
1764
1765        case BFA_IOIM_SM_ABORT_COMP:
1766                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1767                bfa_ioim_move_to_comp_q(ioim);
1768                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1769                              ioim);
1770                break;
1771
1772        case BFA_IOIM_SM_COMP_UTAG:
1773                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1774                bfa_ioim_move_to_comp_q(ioim);
1775                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1776                              ioim);
1777                break;
1778
1779        case BFA_IOIM_SM_CLEANUP:
1780                WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1781                ioim->iosp->abort_explicit = BFA_FALSE;
1782
1783                if (bfa_ioim_send_abort(ioim))
1784                        bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1785                else {
1786                        bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1787                        bfa_stats(ioim->itnim, qwait);
1788                        bfa_reqq_wait(ioim->bfa, ioim->reqq,
1789                                          &ioim->iosp->reqq_wait);
1790                }
1791                break;
1792
1793        case BFA_IOIM_SM_HWFAIL:
1794                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1795                bfa_ioim_move_to_comp_q(ioim);
1796                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1797                              ioim);
1798                break;
1799
1800        default:
1801                bfa_sm_fault(ioim->bfa, event);
1802        }
1803}
1804
1805/*
1806 * IO is being cleaned up (implicit abort), waiting for completion from
1807 * firmware.
1808 */
1809static void
1810bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1811{
1812        bfa_trc(ioim->bfa, ioim->iotag);
1813        bfa_trc(ioim->bfa, event);
1814
1815        switch (event) {
1816        case BFA_IOIM_SM_COMP_GOOD:
1817        case BFA_IOIM_SM_COMP:
1818        case BFA_IOIM_SM_DONE:
1819        case BFA_IOIM_SM_FREE:
1820                break;
1821
1822        case BFA_IOIM_SM_ABORT:
1823                /*
1824                 * IO is already being aborted implicitly
1825                 */
1826                ioim->io_cbfn = __bfa_cb_ioim_abort;
1827                break;
1828
1829        case BFA_IOIM_SM_ABORT_DONE:
1830                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1831                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1832                bfa_ioim_notify_cleanup(ioim);
1833                break;
1834
1835        case BFA_IOIM_SM_ABORT_COMP:
1836                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1837                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1838                bfa_ioim_notify_cleanup(ioim);
1839                break;
1840
1841        case BFA_IOIM_SM_COMP_UTAG:
1842                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1843                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1844                bfa_ioim_notify_cleanup(ioim);
1845                break;
1846
1847        case BFA_IOIM_SM_HWFAIL:
1848                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1849                bfa_ioim_move_to_comp_q(ioim);
1850                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1851                              ioim);
1852                break;
1853
1854        case BFA_IOIM_SM_CLEANUP:
1855                /*
1856                 * IO can be in cleanup state already due to TM command.
1857                 * 2nd cleanup request comes from ITN offline event.
1858                 */
1859                break;
1860
1861        default:
1862                bfa_sm_fault(ioim->bfa, event);
1863        }
1864}
1865
1866/*
1867 * IO is waiting for room in request CQ
1868 */
1869static void
1870bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1871{
1872        bfa_trc(ioim->bfa, ioim->iotag);
1873        bfa_trc(ioim->bfa, event);
1874
1875        switch (event) {
1876        case BFA_IOIM_SM_QRESUME:
1877                bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1878                bfa_ioim_send_ioreq(ioim);
1879                break;
1880
1881        case BFA_IOIM_SM_ABORT:
1882                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1883                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1884                bfa_ioim_move_to_comp_q(ioim);
1885                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1886                              ioim);
1887                break;
1888
1889        case BFA_IOIM_SM_CLEANUP:
1890                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1891                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1892                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1893                              ioim);
1894                bfa_ioim_notify_cleanup(ioim);
1895                break;
1896
1897        case BFA_IOIM_SM_HWFAIL:
1898                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1899                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1900                bfa_ioim_move_to_comp_q(ioim);
1901                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1902                              ioim);
1903                break;
1904
1905        default:
1906                bfa_sm_fault(ioim->bfa, event);
1907        }
1908}
1909
1910/*
1911 * Active IO is being aborted, waiting for room in request CQ.
1912 */
1913static void
1914bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1915{
1916        bfa_trc(ioim->bfa, ioim->iotag);
1917        bfa_trc(ioim->bfa, event);
1918
1919        switch (event) {
1920        case BFA_IOIM_SM_QRESUME:
1921                bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1922                bfa_ioim_send_abort(ioim);
1923                break;
1924
1925        case BFA_IOIM_SM_CLEANUP:
1926                WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1927                ioim->iosp->abort_explicit = BFA_FALSE;
1928                bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1929                break;
1930
1931        case BFA_IOIM_SM_COMP_GOOD:
1932        case BFA_IOIM_SM_COMP:
1933                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1934                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1935                bfa_ioim_move_to_comp_q(ioim);
1936                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1937                              ioim);
1938                break;
1939
1940        case BFA_IOIM_SM_DONE:
1941                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1942                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1943                bfa_ioim_move_to_comp_q(ioim);
1944                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1945                              ioim);
1946                break;
1947
1948        case BFA_IOIM_SM_HWFAIL:
1949                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1950                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1951                bfa_ioim_move_to_comp_q(ioim);
1952                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1953                              ioim);
1954                break;
1955
1956        default:
1957                bfa_sm_fault(ioim->bfa, event);
1958        }
1959}
1960
1961/*
1962 * Active IO is being cleaned up, waiting for room in request CQ.
1963 */
1964static void
1965bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1966{
1967        bfa_trc(ioim->bfa, ioim->iotag);
1968        bfa_trc(ioim->bfa, event);
1969
1970        switch (event) {
1971        case BFA_IOIM_SM_QRESUME:
1972                bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1973                bfa_ioim_send_abort(ioim);
1974                break;
1975
1976        case BFA_IOIM_SM_ABORT:
1977                /*
1978                 * IO is already being cleaned up implicitly
1979                 */
1980                ioim->io_cbfn = __bfa_cb_ioim_abort;
1981                break;
1982
1983        case BFA_IOIM_SM_COMP_GOOD:
1984        case BFA_IOIM_SM_COMP:
1985                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1986                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1987                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1988                bfa_ioim_notify_cleanup(ioim);
1989                break;
1990
1991        case BFA_IOIM_SM_DONE:
1992                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1993                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1994                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1995                bfa_ioim_notify_cleanup(ioim);
1996                break;
1997
1998        case BFA_IOIM_SM_HWFAIL:
1999                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2000                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2001                bfa_ioim_move_to_comp_q(ioim);
2002                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2003                              ioim);
2004                break;
2005
2006        default:
2007                bfa_sm_fault(ioim->bfa, event);
2008        }
2009}
2010
2011/*
2012 * IO bfa callback is pending.
2013 */
2014static void
2015bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2016{
2017        switch (event) {
2018        case BFA_IOIM_SM_HCB:
2019                bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2020                bfa_ioim_free(ioim);
2021                break;
2022
2023        case BFA_IOIM_SM_CLEANUP:
2024                bfa_ioim_notify_cleanup(ioim);
2025                break;
2026
2027        case BFA_IOIM_SM_HWFAIL:
2028                break;
2029
2030        default:
2031                bfa_sm_fault(ioim->bfa, event);
2032        }
2033}
2034
2035/*
2036 * IO bfa callback is pending. IO resource cannot be freed.
2037 */
2038static void
2039bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2040{
2041        bfa_trc(ioim->bfa, ioim->iotag);
2042        bfa_trc(ioim->bfa, event);
2043
2044        switch (event) {
2045        case BFA_IOIM_SM_HCB:
2046                bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2047                list_del(&ioim->qe);
2048                list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2049                break;
2050
2051        case BFA_IOIM_SM_FREE:
2052                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2053                break;
2054
2055        case BFA_IOIM_SM_CLEANUP:
2056                bfa_ioim_notify_cleanup(ioim);
2057                break;
2058
2059        case BFA_IOIM_SM_HWFAIL:
2060                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2061                break;
2062
2063        default:
2064                bfa_sm_fault(ioim->bfa, event);
2065        }
2066}
2067
2068/*
2069 * IO is completed, waiting resource free from firmware.
2070 */
2071static void
2072bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2073{
2074        bfa_trc(ioim->bfa, ioim->iotag);
2075        bfa_trc(ioim->bfa, event);
2076
2077        switch (event) {
2078        case BFA_IOIM_SM_FREE:
2079                bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2080                bfa_ioim_free(ioim);
2081                break;
2082
2083        case BFA_IOIM_SM_CLEANUP:
2084                bfa_ioim_notify_cleanup(ioim);
2085                break;
2086
2087        case BFA_IOIM_SM_HWFAIL:
2088                break;
2089
2090        default:
2091                bfa_sm_fault(ioim->bfa, event);
2092        }
2093}
2094
2095/*
2096 * This is called from bfa_fcpim_start after the bfa_init() with flash read
2097 * is complete by driver. now invalidate the stale content of lun mask
2098 * like unit attention, rp tag and lp tag.
2099 */
2100void
2101bfa_ioim_lm_init(struct bfa_s *bfa)
2102{
2103        struct bfa_lun_mask_s *lunm_list;
2104        int     i;
2105
2106        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2107                return;
2108
2109        lunm_list = bfa_get_lun_mask_list(bfa);
2110        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2111                lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
2112                lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2113                lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2114        }
2115}
2116
2117static void
2118__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2119{
2120        struct bfa_ioim_s *ioim = cbarg;
2121
2122        if (!complete) {
2123                bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2124                return;
2125        }
2126
2127        bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2128}
2129
2130static void
2131__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2132{
2133        struct bfa_ioim_s       *ioim = cbarg;
2134        struct bfi_ioim_rsp_s *m;
2135        u8      *snsinfo = NULL;
2136        u8      sns_len = 0;
2137        s32     residue = 0;
2138
2139        if (!complete) {
2140                bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2141                return;
2142        }
2143
2144        m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2145        if (m->io_status == BFI_IOIM_STS_OK) {
2146                /*
2147                 * setup sense information, if present
2148                 */
2149                if ((m->scsi_status == SAM_STAT_CHECK_CONDITION) &&
2150                                        m->sns_len) {
2151                        sns_len = m->sns_len;
2152                        snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2153                                                ioim->iotag);
2154                }
2155
2156                /*
2157                 * setup residue value correctly for normal completions
2158                 */
2159                if (m->resid_flags == FCP_RESID_UNDER) {
2160                        residue = be32_to_cpu(m->residue);
2161                        bfa_stats(ioim->itnim, iocomp_underrun);
2162                }
2163                if (m->resid_flags == FCP_RESID_OVER) {
2164                        residue = be32_to_cpu(m->residue);
2165                        residue = -residue;
2166                        bfa_stats(ioim->itnim, iocomp_overrun);
2167                }
2168        }
2169
2170        bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2171                          m->scsi_status, sns_len, snsinfo, residue);
2172}
2173
2174void
2175bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2176                        u16 rp_tag, u8 lp_tag)
2177{
2178        struct bfa_lun_mask_s *lun_list;
2179        u8      i;
2180
2181        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2182                return;
2183
2184        lun_list = bfa_get_lun_mask_list(bfa);
2185        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2186                if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2187                        if ((lun_list[i].lp_wwn == lp_wwn) &&
2188                            (lun_list[i].rp_wwn == rp_wwn)) {
2189                                lun_list[i].rp_tag = rp_tag;
2190                                lun_list[i].lp_tag = lp_tag;
2191                        }
2192                }
2193        }
2194}
2195
2196/*
2197 * set UA for all active luns in LM DB
2198 */
2199static void
2200bfa_ioim_lm_set_ua(struct bfa_s *bfa)
2201{
2202        struct bfa_lun_mask_s   *lunm_list;
2203        int     i;
2204
2205        lunm_list = bfa_get_lun_mask_list(bfa);
2206        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2207                if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2208                        continue;
2209                lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2210        }
2211}
2212
2213bfa_status_t
2214bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
2215{
2216        struct bfa_lunmask_cfg_s        *lun_mask;
2217
2218        bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2219        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2220                return BFA_STATUS_FAILED;
2221
2222        if (bfa_get_lun_mask_status(bfa) == update)
2223                return BFA_STATUS_NO_CHANGE;
2224
2225        lun_mask = bfa_get_lun_mask(bfa);
2226        lun_mask->status = update;
2227
2228        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
2229                bfa_ioim_lm_set_ua(bfa);
2230
2231        return  bfa_dconf_update(bfa);
2232}
2233
2234bfa_status_t
2235bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
2236{
2237        int i;
2238        struct bfa_lun_mask_s   *lunm_list;
2239
2240        bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2241        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2242                return BFA_STATUS_FAILED;
2243
2244        lunm_list = bfa_get_lun_mask_list(bfa);
2245        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2246                if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2247                        if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
2248                                bfa_rport_unset_lunmask(bfa,
2249                                  BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
2250                }
2251        }
2252
2253        memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
2254        return bfa_dconf_update(bfa);
2255}
2256
2257bfa_status_t
2258bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
2259{
2260        struct bfa_lunmask_cfg_s *lun_mask;
2261
2262        bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2263        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2264                return BFA_STATUS_FAILED;
2265
2266        lun_mask = bfa_get_lun_mask(bfa);
2267        memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
2268        return BFA_STATUS_OK;
2269}
2270
2271bfa_status_t
2272bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2273                      wwn_t rpwwn, struct scsi_lun lun)
2274{
2275        struct bfa_lun_mask_s *lunm_list;
2276        struct bfa_rport_s *rp = NULL;
2277        int i, free_index = MAX_LUN_MASK_CFG + 1;
2278        struct bfa_fcs_lport_s *port = NULL;
2279        struct bfa_fcs_rport_s *rp_fcs;
2280
2281        bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2282        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2283                return BFA_STATUS_FAILED;
2284
2285        port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
2286                                   vf_id, *pwwn);
2287        if (port) {
2288                *pwwn = port->port_cfg.pwwn;
2289                rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2290                if (rp_fcs)
2291                        rp = rp_fcs->bfa_rport;
2292        }
2293
2294        lunm_list = bfa_get_lun_mask_list(bfa);
2295        /* if entry exists */
2296        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2297                if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2298                        free_index = i;
2299                if ((lunm_list[i].lp_wwn == *pwwn) &&
2300                    (lunm_list[i].rp_wwn == rpwwn) &&
2301                    (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2302                     scsilun_to_int((struct scsi_lun *)&lun)))
2303                        return  BFA_STATUS_ENTRY_EXISTS;
2304        }
2305
2306        if (free_index > MAX_LUN_MASK_CFG)
2307                return BFA_STATUS_MAX_ENTRY_REACHED;
2308
2309        if (rp) {
2310                lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
2311                                                   rp->rport_info.local_pid);
2312                lunm_list[free_index].rp_tag = rp->rport_tag;
2313        } else {
2314                lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
2315                lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
2316        }
2317
2318        lunm_list[free_index].lp_wwn = *pwwn;
2319        lunm_list[free_index].rp_wwn = rpwwn;
2320        lunm_list[free_index].lun = lun;
2321        lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
2322
2323        /* set for all luns in this rp */
2324        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2325                if ((lunm_list[i].lp_wwn == *pwwn) &&
2326                    (lunm_list[i].rp_wwn == rpwwn))
2327                        lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2328        }
2329
2330        return bfa_dconf_update(bfa);
2331}
2332
2333bfa_status_t
2334bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2335                         wwn_t rpwwn, struct scsi_lun lun)
2336{
2337        struct bfa_lun_mask_s   *lunm_list;
2338        struct bfa_fcs_lport_s *port = NULL;
2339        int     i;
2340
2341        /* in min cfg lunm_list could be NULL but  no commands should run. */
2342        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2343                return BFA_STATUS_FAILED;
2344
2345        bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2346        bfa_trc(bfa, *pwwn);
2347        bfa_trc(bfa, rpwwn);
2348        bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
2349
2350        if (*pwwn == 0) {
2351                port = bfa_fcs_lookup_port(
2352                                &((struct bfad_s *)bfa->bfad)->bfa_fcs,
2353                                vf_id, *pwwn);
2354                if (port)
2355                        *pwwn = port->port_cfg.pwwn;
2356        }
2357
2358        lunm_list = bfa_get_lun_mask_list(bfa);
2359        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2360                if ((lunm_list[i].lp_wwn == *pwwn) &&
2361                    (lunm_list[i].rp_wwn == rpwwn) &&
2362                    (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2363                     scsilun_to_int((struct scsi_lun *)&lun))) {
2364                        lunm_list[i].lp_wwn = 0;
2365                        lunm_list[i].rp_wwn = 0;
2366                        int_to_scsilun(0, &lunm_list[i].lun);
2367                        lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
2368                        if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
2369                                lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2370                                lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2371                        }
2372                        return bfa_dconf_update(bfa);
2373                }
2374        }
2375
2376        /* set for all luns in this rp */
2377        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2378                if ((lunm_list[i].lp_wwn == *pwwn) &&
2379                    (lunm_list[i].rp_wwn == rpwwn))
2380                        lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2381        }
2382
2383        return BFA_STATUS_ENTRY_NOT_EXISTS;
2384}
2385
2386static void
2387__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2388{
2389        struct bfa_ioim_s *ioim = cbarg;
2390
2391        if (!complete) {
2392                bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2393                return;
2394        }
2395
2396        bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2397                          0, 0, NULL, 0);
2398}
2399
2400static void
2401__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2402{
2403        struct bfa_ioim_s *ioim = cbarg;
2404
2405        bfa_stats(ioim->itnim, path_tov_expired);
2406        if (!complete) {
2407                bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2408                return;
2409        }
2410
2411        bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2412                          0, 0, NULL, 0);
2413}
2414
2415static void
2416__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2417{
2418        struct bfa_ioim_s *ioim = cbarg;
2419
2420        if (!complete) {
2421                bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2422                return;
2423        }
2424
2425        bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2426}
2427
2428static void
2429bfa_ioim_sgpg_alloced(void *cbarg)
2430{
2431        struct bfa_ioim_s *ioim = cbarg;
2432
2433        ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2434        list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2435        ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2436        bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2437}
2438
2439/*
2440 * Send I/O request to firmware.
2441 */
2442static  bfa_boolean_t
2443bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2444{
2445        struct bfa_itnim_s *itnim = ioim->itnim;
2446        struct bfi_ioim_req_s *m;
2447        static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2448        struct bfi_sge_s *sge, *sgpge;
2449        u32     pgdlen = 0;
2450        u32     fcp_dl;
2451        u64 addr;
2452        struct scatterlist *sg;
2453        struct bfa_sgpg_s *sgpg;
2454        struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2455        u32 i, sge_id, pgcumsz;
2456        enum dma_data_direction dmadir;
2457
2458        /*
2459         * check for room in queue to send request now
2460         */
2461        m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2462        if (!m) {
2463                bfa_stats(ioim->itnim, qwait);
2464                bfa_reqq_wait(ioim->bfa, ioim->reqq,
2465                                  &ioim->iosp->reqq_wait);
2466                return BFA_FALSE;
2467        }
2468
2469        /*
2470         * build i/o request message next
2471         */
2472        m->io_tag = cpu_to_be16(ioim->iotag);
2473        m->rport_hdl = ioim->itnim->rport->fw_handle;
2474        m->io_timeout = 0;
2475
2476        sge = &m->sges[0];
2477        sgpg = ioim->sgpg;
2478        sge_id = 0;
2479        sgpge = NULL;
2480        pgcumsz = 0;
2481        scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2482                if (i == 0) {
2483                        /* build inline IO SG element */
2484                        addr = bfa_sgaddr_le(sg_dma_address(sg));
2485                        sge->sga = *(union bfi_addr_u *) &addr;
2486                        pgdlen = sg_dma_len(sg);
2487                        sge->sg_len = pgdlen;
2488                        sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2489                                        BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2490                        bfa_sge_to_be(sge);
2491                        sge++;
2492                } else {
2493                        if (sge_id == 0)
2494                                sgpge = sgpg->sgpg->sges;
2495
2496                        addr = bfa_sgaddr_le(sg_dma_address(sg));
2497                        sgpge->sga = *(union bfi_addr_u *) &addr;
2498                        sgpge->sg_len = sg_dma_len(sg);
2499                        pgcumsz += sgpge->sg_len;
2500
2501                        /* set flags */
2502                        if (i < (ioim->nsges - 1) &&
2503                                        sge_id < (BFI_SGPG_DATA_SGES - 1))
2504                                sgpge->flags = BFI_SGE_DATA;
2505                        else if (i < (ioim->nsges - 1))
2506                                sgpge->flags = BFI_SGE_DATA_CPL;
2507                        else
2508                                sgpge->flags = BFI_SGE_DATA_LAST;
2509
2510                        bfa_sge_to_le(sgpge);
2511
2512                        sgpge++;
2513                        if (i == (ioim->nsges - 1)) {
2514                                sgpge->flags = BFI_SGE_PGDLEN;
2515                                sgpge->sga.a32.addr_lo = 0;
2516                                sgpge->sga.a32.addr_hi = 0;
2517                                sgpge->sg_len = pgcumsz;
2518                                bfa_sge_to_le(sgpge);
2519                        } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2520                                sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2521                                sgpge->flags = BFI_SGE_LINK;
2522                                sgpge->sga = sgpg->sgpg_pa;
2523                                sgpge->sg_len = pgcumsz;
2524                                bfa_sge_to_le(sgpge);
2525                                sge_id = 0;
2526                                pgcumsz = 0;
2527                        }
2528                }
2529        }
2530
2531        if (ioim->nsges > BFI_SGE_INLINE) {
2532                sge->sga = ioim->sgpg->sgpg_pa;
2533        } else {
2534                sge->sga.a32.addr_lo = 0;
2535                sge->sga.a32.addr_hi = 0;
2536        }
2537        sge->sg_len = pgdlen;
2538        sge->flags = BFI_SGE_PGDLEN;
2539        bfa_sge_to_be(sge);
2540
2541        /*
2542         * set up I/O command parameters
2543         */
2544        m->cmnd = cmnd_z0;
2545        int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2546        dmadir = cmnd->sc_data_direction;
2547        if (dmadir == DMA_TO_DEVICE)
2548                m->cmnd.iodir = FCP_IODIR_WRITE;
2549        else if (dmadir == DMA_FROM_DEVICE)
2550                m->cmnd.iodir = FCP_IODIR_READ;
2551        else
2552                m->cmnd.iodir = FCP_IODIR_NONE;
2553
2554        m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
2555        fcp_dl = scsi_bufflen(cmnd);
2556        m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2557
2558        /*
2559         * set up I/O message header
2560         */
2561        switch (m->cmnd.iodir) {
2562        case FCP_IODIR_READ:
2563                bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
2564                bfa_stats(itnim, input_reqs);
2565                ioim->itnim->stats.rd_throughput += fcp_dl;
2566                break;
2567        case FCP_IODIR_WRITE:
2568                bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
2569                bfa_stats(itnim, output_reqs);
2570                ioim->itnim->stats.wr_throughput += fcp_dl;
2571                break;
2572        case FCP_IODIR_RW:
2573                bfa_stats(itnim, input_reqs);
2574                bfa_stats(itnim, output_reqs);
2575                fallthrough;
2576        default:
2577                bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2578        }
2579        if (itnim->seq_rec ||
2580            (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2581                bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2582
2583        /*
2584         * queue I/O message to firmware
2585         */
2586        bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2587        return BFA_TRUE;
2588}
2589
2590/*
2591 * Setup any additional SG pages needed.Inline SG element is setup
2592 * at queuing time.
2593 */
2594static bfa_boolean_t
2595bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2596{
2597        u16     nsgpgs;
2598
2599        WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2600
2601        /*
2602         * allocate SG pages needed
2603         */
2604        nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2605        if (!nsgpgs)
2606                return BFA_TRUE;
2607
2608        if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2609            != BFA_STATUS_OK) {
2610                bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2611                return BFA_FALSE;
2612        }
2613
2614        ioim->nsgpgs = nsgpgs;
2615        ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2616
2617        return BFA_TRUE;
2618}
2619
2620/*
2621 * Send I/O abort request to firmware.
2622 */
2623static  bfa_boolean_t
2624bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2625{
2626        struct bfi_ioim_abort_req_s *m;
2627        enum bfi_ioim_h2i       msgop;
2628
2629        /*
2630         * check for room in queue to send request now
2631         */
2632        m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2633        if (!m)
2634                return BFA_FALSE;
2635
2636        /*
2637         * build i/o request message next
2638         */
2639        if (ioim->iosp->abort_explicit)
2640                msgop = BFI_IOIM_H2I_IOABORT_REQ;
2641        else
2642                msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2643
2644        bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
2645        m->io_tag    = cpu_to_be16(ioim->iotag);
2646        m->abort_tag = ++ioim->abort_tag;
2647
2648        /*
2649         * queue I/O message to firmware
2650         */
2651        bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2652        return BFA_TRUE;
2653}
2654
2655/*
2656 * Call to resume any I/O requests waiting for room in request queue.
2657 */
2658static void
2659bfa_ioim_qresume(void *cbarg)
2660{
2661        struct bfa_ioim_s *ioim = cbarg;
2662
2663        bfa_stats(ioim->itnim, qresumes);
2664        bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2665}
2666
2667
2668static void
2669bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2670{
2671        /*
2672         * Move IO from itnim queue to fcpim global queue since itnim will be
2673         * freed.
2674         */
2675        list_del(&ioim->qe);
2676        list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2677
2678        if (!ioim->iosp->tskim) {
2679                if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2680                        bfa_cb_dequeue(&ioim->hcb_qe);
2681                        list_del(&ioim->qe);
2682                        list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2683                }
2684                bfa_itnim_iodone(ioim->itnim);
2685        } else
2686                bfa_wc_down(&ioim->iosp->tskim->wc);
2687}
2688
2689static bfa_boolean_t
2690bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2691{
2692        if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2693            (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)))    ||
2694            (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort))         ||
2695            (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull))   ||
2696            (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb))           ||
2697            (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free))      ||
2698            (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2699                return BFA_FALSE;
2700
2701        return BFA_TRUE;
2702}
2703
2704void
2705bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2706{
2707        /*
2708         * If path tov timer expired, failback with PATHTOV status - these
2709         * IO requests are not normally retried by IO stack.
2710         *
2711         * Otherwise device cameback online and fail it with normal failed
2712         * status so that IO stack retries these failed IO requests.
2713         */
2714        if (iotov)
2715                ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2716        else {
2717                ioim->io_cbfn = __bfa_cb_ioim_failed;
2718                bfa_stats(ioim->itnim, iocom_nexus_abort);
2719        }
2720        bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2721
2722        /*
2723         * Move IO to fcpim global queue since itnim will be
2724         * freed.
2725         */
2726        list_del(&ioim->qe);
2727        list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2728}
2729
2730
2731/*
2732 * Memory allocation and initialization.
2733 */
2734void
2735bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
2736{
2737        struct bfa_ioim_s               *ioim;
2738        struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
2739        struct bfa_ioim_sp_s    *iosp;
2740        u16             i;
2741
2742        /*
2743         * claim memory first
2744         */
2745        ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
2746        fcpim->ioim_arr = ioim;
2747        bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
2748
2749        iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
2750        fcpim->ioim_sp_arr = iosp;
2751        bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
2752
2753        /*
2754         * Initialize ioim free queues
2755         */
2756        INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2757        INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2758
2759        for (i = 0; i < fcpim->fcp->num_ioim_reqs;
2760             i++, ioim++, iosp++) {
2761                /*
2762                 * initialize IOIM
2763                 */
2764                memset(ioim, 0, sizeof(struct bfa_ioim_s));
2765                ioim->iotag   = i;
2766                ioim->bfa     = fcpim->bfa;
2767                ioim->fcpim   = fcpim;
2768                ioim->iosp    = iosp;
2769                INIT_LIST_HEAD(&ioim->sgpg_q);
2770                bfa_reqq_winit(&ioim->iosp->reqq_wait,
2771                                   bfa_ioim_qresume, ioim);
2772                bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2773                                   bfa_ioim_sgpg_alloced, ioim);
2774                bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2775        }
2776}
2777
2778void
2779bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2780{
2781        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2782        struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2783        struct bfa_ioim_s *ioim;
2784        u16     iotag;
2785        enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2786
2787        iotag = be16_to_cpu(rsp->io_tag);
2788
2789        ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2790        WARN_ON(ioim->iotag != iotag);
2791
2792        bfa_trc(ioim->bfa, ioim->iotag);
2793        bfa_trc(ioim->bfa, rsp->io_status);
2794        bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2795
2796        if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2797                ioim->iosp->comp_rspmsg = *m;
2798
2799        switch (rsp->io_status) {
2800        case BFI_IOIM_STS_OK:
2801                bfa_stats(ioim->itnim, iocomp_ok);
2802                if (rsp->reuse_io_tag == 0)
2803                        evt = BFA_IOIM_SM_DONE;
2804                else
2805                        evt = BFA_IOIM_SM_COMP;
2806                break;
2807
2808        case BFI_IOIM_STS_TIMEDOUT:
2809                bfa_stats(ioim->itnim, iocomp_timedout);
2810                fallthrough;
2811        case BFI_IOIM_STS_ABORTED:
2812                rsp->io_status = BFI_IOIM_STS_ABORTED;
2813                bfa_stats(ioim->itnim, iocomp_aborted);
2814                if (rsp->reuse_io_tag == 0)
2815                        evt = BFA_IOIM_SM_DONE;
2816                else
2817                        evt = BFA_IOIM_SM_COMP;
2818                break;
2819
2820        case BFI_IOIM_STS_PROTO_ERR:
2821                bfa_stats(ioim->itnim, iocom_proto_err);
2822                WARN_ON(!rsp->reuse_io_tag);
2823                evt = BFA_IOIM_SM_COMP;
2824                break;
2825
2826        case BFI_IOIM_STS_SQER_NEEDED:
2827                bfa_stats(ioim->itnim, iocom_sqer_needed);
2828                WARN_ON(rsp->reuse_io_tag != 0);
2829                evt = BFA_IOIM_SM_SQRETRY;
2830                break;
2831
2832        case BFI_IOIM_STS_RES_FREE:
2833                bfa_stats(ioim->itnim, iocom_res_free);
2834                evt = BFA_IOIM_SM_FREE;
2835                break;
2836
2837        case BFI_IOIM_STS_HOST_ABORTED:
2838                bfa_stats(ioim->itnim, iocom_hostabrts);
2839                if (rsp->abort_tag != ioim->abort_tag) {
2840                        bfa_trc(ioim->bfa, rsp->abort_tag);
2841                        bfa_trc(ioim->bfa, ioim->abort_tag);
2842                        return;
2843                }
2844
2845                if (rsp->reuse_io_tag)
2846                        evt = BFA_IOIM_SM_ABORT_COMP;
2847                else
2848                        evt = BFA_IOIM_SM_ABORT_DONE;
2849                break;
2850
2851        case BFI_IOIM_STS_UTAG:
2852                bfa_stats(ioim->itnim, iocom_utags);
2853                evt = BFA_IOIM_SM_COMP_UTAG;
2854                break;
2855
2856        default:
2857                WARN_ON(1);
2858        }
2859
2860        bfa_sm_send_event(ioim, evt);
2861}
2862
2863void
2864bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2865{
2866        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2867        struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2868        struct bfa_ioim_s *ioim;
2869        u16     iotag;
2870
2871        iotag = be16_to_cpu(rsp->io_tag);
2872
2873        ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2874        WARN_ON(ioim->iotag != iotag);
2875
2876        bfa_ioim_cb_profile_comp(fcpim, ioim);
2877
2878        bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2879}
2880
2881/*
2882 * Called by itnim to clean up IO while going offline.
2883 */
2884void
2885bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2886{
2887        bfa_trc(ioim->bfa, ioim->iotag);
2888        bfa_stats(ioim->itnim, io_cleanups);
2889
2890        ioim->iosp->tskim = NULL;
2891        bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2892}
2893
2894void
2895bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2896{
2897        bfa_trc(ioim->bfa, ioim->iotag);
2898        bfa_stats(ioim->itnim, io_tmaborts);
2899
2900        ioim->iosp->tskim = tskim;
2901        bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2902}
2903
2904/*
2905 * IOC failure handling.
2906 */
2907void
2908bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2909{
2910        bfa_trc(ioim->bfa, ioim->iotag);
2911        bfa_stats(ioim->itnim, io_iocdowns);
2912        bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2913}
2914
2915/*
2916 * IO offline TOV popped. Fail the pending IO.
2917 */
2918void
2919bfa_ioim_tov(struct bfa_ioim_s *ioim)
2920{
2921        bfa_trc(ioim->bfa, ioim->iotag);
2922        bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2923}
2924
2925
2926/*
2927 * Allocate IOIM resource for initiator mode I/O request.
2928 */
2929struct bfa_ioim_s *
2930bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2931                struct bfa_itnim_s *itnim, u16 nsges)
2932{
2933        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2934        struct bfa_ioim_s *ioim;
2935        struct bfa_iotag_s *iotag = NULL;
2936
2937        /*
2938         * alocate IOIM resource
2939         */
2940        bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
2941        if (!iotag) {
2942                bfa_stats(itnim, no_iotags);
2943                return NULL;
2944        }
2945
2946        ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
2947
2948        ioim->dio = dio;
2949        ioim->itnim = itnim;
2950        ioim->nsges = nsges;
2951        ioim->nsgpgs = 0;
2952
2953        bfa_stats(itnim, total_ios);
2954        fcpim->ios_active++;
2955
2956        list_add_tail(&ioim->qe, &itnim->io_q);
2957
2958        return ioim;
2959}
2960
2961void
2962bfa_ioim_free(struct bfa_ioim_s *ioim)
2963{
2964        struct bfa_fcpim_s *fcpim = ioim->fcpim;
2965        struct bfa_iotag_s *iotag;
2966
2967        if (ioim->nsgpgs > 0)
2968                bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2969
2970        bfa_stats(ioim->itnim, io_comps);
2971        fcpim->ios_active--;
2972
2973        ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2974
2975        WARN_ON(!(ioim->iotag <
2976                (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
2977        iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
2978
2979        if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
2980                list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
2981        else
2982                list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
2983
2984        list_del(&ioim->qe);
2985}
2986
2987void
2988bfa_ioim_start(struct bfa_ioim_s *ioim)
2989{
2990        bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2991
2992        /*
2993         * Obtain the queue over which this request has to be issued
2994         */
2995        ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2996                        BFA_FALSE : bfa_itnim_get_reqq(ioim);
2997
2998        bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2999}
3000
3001/*
3002 * Driver I/O abort request.
3003 */
3004bfa_status_t
3005bfa_ioim_abort(struct bfa_ioim_s *ioim)
3006{
3007
3008        bfa_trc(ioim->bfa, ioim->iotag);
3009
3010        if (!bfa_ioim_is_abortable(ioim))
3011                return BFA_STATUS_FAILED;
3012
3013        bfa_stats(ioim->itnim, io_aborts);
3014        bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
3015
3016        return BFA_STATUS_OK;
3017}
3018
3019/*
3020 *  BFA TSKIM state machine functions
3021 */
3022
3023/*
3024 * Task management command beginning state.
3025 */
3026static void
3027bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3028{
3029        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3030
3031        switch (event) {
3032        case BFA_TSKIM_SM_START:
3033                bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3034                bfa_tskim_gather_ios(tskim);
3035
3036                /*
3037                 * If device is offline, do not send TM on wire. Just cleanup
3038                 * any pending IO requests and complete TM request.
3039                 */
3040                if (!bfa_itnim_is_online(tskim->itnim)) {
3041                        bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3042                        tskim->tsk_status = BFI_TSKIM_STS_OK;
3043                        bfa_tskim_cleanup_ios(tskim);
3044                        return;
3045                }
3046
3047                if (!bfa_tskim_send(tskim)) {
3048                        bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3049                        bfa_stats(tskim->itnim, tm_qwait);
3050                        bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3051                                          &tskim->reqq_wait);
3052                }
3053                break;
3054
3055        default:
3056                bfa_sm_fault(tskim->bfa, event);
3057        }
3058}
3059
3060/*
3061 * TM command is active, awaiting completion from firmware to
3062 * cleanup IO requests in TM scope.
3063 */
3064static void
3065bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3066{
3067        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3068
3069        switch (event) {
3070        case BFA_TSKIM_SM_DONE:
3071                bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3072                bfa_tskim_cleanup_ios(tskim);
3073                break;
3074
3075        case BFA_TSKIM_SM_CLEANUP:
3076                bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3077                if (!bfa_tskim_send_abort(tskim)) {
3078                        bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3079                        bfa_stats(tskim->itnim, tm_qwait);
3080                        bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3081                                &tskim->reqq_wait);
3082                }
3083                break;
3084
3085        case BFA_TSKIM_SM_HWFAIL:
3086                bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3087                bfa_tskim_iocdisable_ios(tskim);
3088                bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3089                break;
3090
3091        default:
3092                bfa_sm_fault(tskim->bfa, event);
3093        }
3094}
3095
3096/*
3097 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3098 * completion event from firmware.
3099 */
3100static void
3101bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3102{
3103        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3104
3105        switch (event) {
3106        case BFA_TSKIM_SM_DONE:
3107                /*
3108                 * Ignore and wait for ABORT completion from firmware.
3109                 */
3110                break;
3111
3112        case BFA_TSKIM_SM_UTAG:
3113        case BFA_TSKIM_SM_CLEANUP_DONE:
3114                bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3115                bfa_tskim_cleanup_ios(tskim);
3116                break;
3117
3118        case BFA_TSKIM_SM_HWFAIL:
3119                bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3120                bfa_tskim_iocdisable_ios(tskim);
3121                bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3122                break;
3123
3124        default:
3125                bfa_sm_fault(tskim->bfa, event);
3126        }
3127}
3128
3129static void
3130bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3131{
3132        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3133
3134        switch (event) {
3135        case BFA_TSKIM_SM_IOS_DONE:
3136                bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3137                bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3138                break;
3139
3140        case BFA_TSKIM_SM_CLEANUP:
3141                /*
3142                 * Ignore, TM command completed on wire.
3143                 * Notify TM conmpletion on IO cleanup completion.
3144                 */
3145                break;
3146
3147        case BFA_TSKIM_SM_HWFAIL:
3148                bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3149                bfa_tskim_iocdisable_ios(tskim);
3150                bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3151                break;
3152
3153        default:
3154                bfa_sm_fault(tskim->bfa, event);
3155        }
3156}
3157
3158/*
3159 * Task management command is waiting for room in request CQ
3160 */
3161static void
3162bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3163{
3164        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3165
3166        switch (event) {
3167        case BFA_TSKIM_SM_QRESUME:
3168                bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3169                bfa_tskim_send(tskim);
3170                break;
3171
3172        case BFA_TSKIM_SM_CLEANUP:
3173                /*
3174                 * No need to send TM on wire since ITN is offline.
3175                 */
3176                bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3177                bfa_reqq_wcancel(&tskim->reqq_wait);
3178                bfa_tskim_cleanup_ios(tskim);
3179                break;
3180
3181        case BFA_TSKIM_SM_HWFAIL:
3182                bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3183                bfa_reqq_wcancel(&tskim->reqq_wait);
3184                bfa_tskim_iocdisable_ios(tskim);
3185                bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3186                break;
3187
3188        default:
3189                bfa_sm_fault(tskim->bfa, event);
3190        }
3191}
3192
3193/*
3194 * Task management command is active, awaiting for room in request CQ
3195 * to send clean up request.
3196 */
3197static void
3198bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3199                enum bfa_tskim_event event)
3200{
3201        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3202
3203        switch (event) {
3204        case BFA_TSKIM_SM_DONE:
3205                bfa_reqq_wcancel(&tskim->reqq_wait);
3206                fallthrough;
3207        case BFA_TSKIM_SM_QRESUME:
3208                bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3209                bfa_tskim_send_abort(tskim);
3210                break;
3211
3212        case BFA_TSKIM_SM_HWFAIL:
3213                bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3214                bfa_reqq_wcancel(&tskim->reqq_wait);
3215                bfa_tskim_iocdisable_ios(tskim);
3216                bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3217                break;
3218
3219        default:
3220                bfa_sm_fault(tskim->bfa, event);
3221        }
3222}
3223
3224/*
3225 * BFA callback is pending
3226 */
3227static void
3228bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3229{
3230        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3231
3232        switch (event) {
3233        case BFA_TSKIM_SM_HCB:
3234                bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3235                bfa_tskim_free(tskim);
3236                break;
3237
3238        case BFA_TSKIM_SM_CLEANUP:
3239                bfa_tskim_notify_comp(tskim);
3240                break;
3241
3242        case BFA_TSKIM_SM_HWFAIL:
3243                break;
3244
3245        default:
3246                bfa_sm_fault(tskim->bfa, event);
3247        }
3248}
3249
3250static void
3251__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3252{
3253        struct bfa_tskim_s *tskim = cbarg;
3254
3255        if (!complete) {
3256                bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3257                return;
3258        }
3259
3260        bfa_stats(tskim->itnim, tm_success);
3261        bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3262}
3263
3264static void
3265__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3266{
3267        struct bfa_tskim_s *tskim = cbarg;
3268
3269        if (!complete) {
3270                bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3271                return;
3272        }
3273
3274        bfa_stats(tskim->itnim, tm_failures);
3275        bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3276                                BFI_TSKIM_STS_FAILED);
3277}
3278
3279static bfa_boolean_t
3280bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
3281{
3282        switch (tskim->tm_cmnd) {
3283        case FCP_TM_TARGET_RESET:
3284                return BFA_TRUE;
3285
3286        case FCP_TM_ABORT_TASK_SET:
3287        case FCP_TM_CLEAR_TASK_SET:
3288        case FCP_TM_LUN_RESET:
3289        case FCP_TM_CLEAR_ACA:
3290                return !memcmp(&tskim->lun, &lun, sizeof(lun));
3291
3292        default:
3293                WARN_ON(1);
3294        }
3295
3296        return BFA_FALSE;
3297}
3298
3299/*
3300 * Gather affected IO requests and task management commands.
3301 */
3302static void
3303bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3304{
3305        struct bfa_itnim_s *itnim = tskim->itnim;
3306        struct bfa_ioim_s *ioim;
3307        struct list_head *qe, *qen;
3308        struct scsi_cmnd *cmnd;
3309        struct scsi_lun scsilun;
3310
3311        INIT_LIST_HEAD(&tskim->io_q);
3312
3313        /*
3314         * Gather any active IO requests first.
3315         */
3316        list_for_each_safe(qe, qen, &itnim->io_q) {
3317                ioim = (struct bfa_ioim_s *) qe;
3318                cmnd = (struct scsi_cmnd *) ioim->dio;
3319                int_to_scsilun(cmnd->device->lun, &scsilun);
3320                if (bfa_tskim_match_scope(tskim, scsilun)) {
3321                        list_del(&ioim->qe);
3322                        list_add_tail(&ioim->qe, &tskim->io_q);
3323                }
3324        }
3325
3326        /*
3327         * Failback any pending IO requests immediately.
3328         */
3329        list_for_each_safe(qe, qen, &itnim->pending_q) {
3330                ioim = (struct bfa_ioim_s *) qe;
3331                cmnd = (struct scsi_cmnd *) ioim->dio;
3332                int_to_scsilun(cmnd->device->lun, &scsilun);
3333                if (bfa_tskim_match_scope(tskim, scsilun)) {
3334                        list_del(&ioim->qe);
3335                        list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3336                        bfa_ioim_tov(ioim);
3337                }
3338        }
3339}
3340
3341/*
3342 * IO cleanup completion
3343 */
3344static void
3345bfa_tskim_cleanp_comp(void *tskim_cbarg)
3346{
3347        struct bfa_tskim_s *tskim = tskim_cbarg;
3348
3349        bfa_stats(tskim->itnim, tm_io_comps);
3350        bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3351}
3352
3353/*
3354 * Gather affected IO requests and task management commands.
3355 */
3356static void
3357bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3358{
3359        struct bfa_ioim_s *ioim;
3360        struct list_head        *qe, *qen;
3361
3362        bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3363
3364        list_for_each_safe(qe, qen, &tskim->io_q) {
3365                ioim = (struct bfa_ioim_s *) qe;
3366                bfa_wc_up(&tskim->wc);
3367                bfa_ioim_cleanup_tm(ioim, tskim);
3368        }
3369
3370        bfa_wc_wait(&tskim->wc);
3371}
3372
3373/*
3374 * Send task management request to firmware.
3375 */
3376static bfa_boolean_t
3377bfa_tskim_send(struct bfa_tskim_s *tskim)
3378{
3379        struct bfa_itnim_s *itnim = tskim->itnim;
3380        struct bfi_tskim_req_s *m;
3381
3382        /*
3383         * check for room in queue to send request now
3384         */
3385        m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3386        if (!m)
3387                return BFA_FALSE;
3388
3389        /*
3390         * build i/o request message next
3391         */
3392        bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3393                        bfa_fn_lpu(tskim->bfa));
3394
3395        m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3396        m->itn_fhdl = tskim->itnim->rport->fw_handle;
3397        m->t_secs = tskim->tsecs;
3398        m->lun = tskim->lun;
3399        m->tm_flags = tskim->tm_cmnd;
3400
3401        /*
3402         * queue I/O message to firmware
3403         */
3404        bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3405        return BFA_TRUE;
3406}
3407
3408/*
3409 * Send abort request to cleanup an active TM to firmware.
3410 */
3411static bfa_boolean_t
3412bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3413{
3414        struct bfa_itnim_s      *itnim = tskim->itnim;
3415        struct bfi_tskim_abortreq_s     *m;
3416
3417        /*
3418         * check for room in queue to send request now
3419         */
3420        m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3421        if (!m)
3422                return BFA_FALSE;
3423
3424        /*
3425         * build i/o request message next
3426         */
3427        bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3428                        bfa_fn_lpu(tskim->bfa));
3429
3430        m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
3431
3432        /*
3433         * queue I/O message to firmware
3434         */
3435        bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3436        return BFA_TRUE;
3437}
3438
3439/*
3440 * Call to resume task management cmnd waiting for room in request queue.
3441 */
3442static void
3443bfa_tskim_qresume(void *cbarg)
3444{
3445        struct bfa_tskim_s *tskim = cbarg;
3446
3447        bfa_stats(tskim->itnim, tm_qresumes);
3448        bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3449}
3450
3451/*
3452 * Cleanup IOs associated with a task mangement command on IOC failures.
3453 */
3454static void
3455bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3456{
3457        struct bfa_ioim_s *ioim;
3458        struct list_head        *qe, *qen;
3459
3460        list_for_each_safe(qe, qen, &tskim->io_q) {
3461                ioim = (struct bfa_ioim_s *) qe;
3462                bfa_ioim_iocdisable(ioim);
3463        }
3464}
3465
3466/*
3467 * Notification on completions from related ioim.
3468 */
3469void
3470bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3471{
3472        bfa_wc_down(&tskim->wc);
3473}
3474
3475/*
3476 * Handle IOC h/w failure notification from itnim.
3477 */
3478void
3479bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3480{
3481        tskim->notify = BFA_FALSE;
3482        bfa_stats(tskim->itnim, tm_iocdowns);
3483        bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3484}
3485
3486/*
3487 * Cleanup TM command and associated IOs as part of ITNIM offline.
3488 */
3489void
3490bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3491{
3492        tskim->notify = BFA_TRUE;
3493        bfa_stats(tskim->itnim, tm_cleanups);
3494        bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3495}
3496
3497/*
3498 * Memory allocation and initialization.
3499 */
3500void
3501bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
3502{
3503        struct bfa_tskim_s *tskim;
3504        struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
3505        u16     i;
3506
3507        INIT_LIST_HEAD(&fcpim->tskim_free_q);
3508        INIT_LIST_HEAD(&fcpim->tskim_unused_q);
3509
3510        tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
3511        fcpim->tskim_arr = tskim;
3512
3513        for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3514                /*
3515                 * initialize TSKIM
3516                 */
3517                memset(tskim, 0, sizeof(struct bfa_tskim_s));
3518                tskim->tsk_tag = i;
3519                tskim->bfa      = fcpim->bfa;
3520                tskim->fcpim    = fcpim;
3521                tskim->notify  = BFA_FALSE;
3522                bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3523                                        tskim);
3524                bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3525
3526                list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3527        }
3528
3529        bfa_mem_kva_curp(fcp) = (u8 *) tskim;
3530}
3531
3532void
3533bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3534{
3535        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3536        struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3537        struct bfa_tskim_s *tskim;
3538        u16     tsk_tag = be16_to_cpu(rsp->tsk_tag);
3539
3540        tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3541        WARN_ON(tskim->tsk_tag != tsk_tag);
3542
3543        tskim->tsk_status = rsp->tsk_status;
3544
3545        /*
3546         * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3547         * requests. All other statuses are for normal completions.
3548         */
3549        if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3550                bfa_stats(tskim->itnim, tm_cleanup_comps);
3551                bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3552        } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
3553                bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG);
3554        } else {
3555                bfa_stats(tskim->itnim, tm_fw_rsps);
3556                bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3557        }
3558}
3559
3560
3561struct bfa_tskim_s *
3562bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3563{
3564        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3565        struct bfa_tskim_s *tskim;
3566
3567        bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3568
3569        if (tskim)
3570                tskim->dtsk = dtsk;
3571
3572        return tskim;
3573}
3574
3575void
3576bfa_tskim_free(struct bfa_tskim_s *tskim)
3577{
3578        WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3579        list_del(&tskim->qe);
3580        list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3581}
3582
3583/*
3584 * Start a task management command.
3585 *
3586 * @param[in]   tskim   BFA task management command instance
3587 * @param[in]   itnim   i-t nexus for the task management command
3588 * @param[in]   lun     lun, if applicable
3589 * @param[in]   tm_cmnd Task management command code.
3590 * @param[in]   t_secs  Timeout in seconds
3591 *
3592 * @return None.
3593 */
3594void
3595bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3596                        struct scsi_lun lun,
3597                        enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3598{
3599        tskim->itnim    = itnim;
3600        tskim->lun      = lun;
3601        tskim->tm_cmnd = tm_cmnd;
3602        tskim->tsecs    = tsecs;
3603        tskim->notify  = BFA_FALSE;
3604        bfa_stats(itnim, tm_cmnds);
3605
3606        list_add_tail(&tskim->qe, &itnim->tsk_q);
3607        bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3608}
3609
3610void
3611bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
3612{
3613        struct bfa_fcpim_s      *fcpim = BFA_FCPIM(bfa);
3614        struct list_head        *qe;
3615        int     i;
3616
3617        for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
3618                bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
3619                list_add_tail(qe, &fcpim->tskim_unused_q);
3620        }
3621}
3622
3623void
3624bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3625                struct bfa_s *bfa)
3626{
3627        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3628        struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
3629        struct bfa_mem_dma_s *seg_ptr;
3630        u16     nsegs, idx, per_seg_ios, num_io_req;
3631        u32     km_len = 0;
3632
3633        /*
3634         * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
3635         * So if the values are non zero, adjust them appropriately.
3636         */
3637        if (cfg->fwcfg.num_ioim_reqs &&
3638            cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
3639                cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
3640        else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
3641                cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3642
3643        if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
3644                cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3645
3646        num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3647        if (num_io_req > BFA_IO_MAX) {
3648                if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
3649                        cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
3650                        cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
3651                } else if (cfg->fwcfg.num_fwtio_reqs)
3652                        cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3653                else
3654                        cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3655        }
3656
3657        bfa_fcpim_meminfo(cfg, &km_len);
3658
3659        num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3660        km_len += num_io_req * sizeof(struct bfa_iotag_s);
3661        km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
3662
3663        /* dma memory */
3664        nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3665        per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
3666
3667        bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3668                if (num_io_req >= per_seg_ios) {
3669                        num_io_req -= per_seg_ios;
3670                        bfa_mem_dma_setup(minfo, seg_ptr,
3671                                per_seg_ios * BFI_IOIM_SNSLEN);
3672                } else
3673                        bfa_mem_dma_setup(minfo, seg_ptr,
3674                                num_io_req * BFI_IOIM_SNSLEN);
3675        }
3676
3677        /* kva memory */
3678        bfa_mem_kva_setup(minfo, fcp_kva, km_len);
3679}
3680
3681void
3682bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3683                struct bfa_pcidev_s *pcidev)
3684{
3685        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3686        struct bfa_mem_dma_s *seg_ptr;
3687        u16     idx, nsegs, num_io_req;
3688
3689        fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3690        fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3691        fcp->num_fwtio_reqs  = cfg->fwcfg.num_fwtio_reqs;
3692        fcp->num_itns   = cfg->fwcfg.num_rports;
3693        fcp->bfa = bfa;
3694
3695        /*
3696         * Setup the pool of snsbase addr's, that is passed to fw as
3697         * part of bfi_iocfc_cfg_s.
3698         */
3699        num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3700        nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3701
3702        bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3703
3704                if (!bfa_mem_dma_virt(seg_ptr))
3705                        break;
3706
3707                fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
3708                fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
3709                bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
3710        }
3711
3712        fcp->throttle_update_required = 1;
3713        bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
3714
3715        bfa_iotag_attach(fcp);
3716
3717        fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
3718        bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
3719                        (fcp->num_itns * sizeof(struct bfa_itn_s));
3720        memset(fcp->itn_arr, 0,
3721                        (fcp->num_itns * sizeof(struct bfa_itn_s)));
3722}
3723
3724void
3725bfa_fcp_iocdisable(struct bfa_s *bfa)
3726{
3727        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3728
3729        bfa_fcpim_iocdisable(fcp);
3730}
3731
3732void
3733bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
3734{
3735        struct bfa_fcp_mod_s    *mod = BFA_FCP_MOD(bfa);
3736        struct list_head        *qe;
3737        int     i;
3738
3739        /* Update io throttle value only once during driver load time */
3740        if (!mod->throttle_update_required)
3741                return;
3742
3743        for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3744                bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3745                list_add_tail(qe, &mod->iotag_unused_q);
3746        }
3747
3748        if (mod->num_ioim_reqs != num_ioim_fw) {
3749                bfa_trc(bfa, mod->num_ioim_reqs);
3750                bfa_trc(bfa, num_ioim_fw);
3751        }
3752
3753        mod->max_ioim_reqs = max_ioim_fw;
3754        mod->num_ioim_reqs = num_ioim_fw;
3755        mod->throttle_update_required = 0;
3756}
3757
3758void
3759bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
3760                void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
3761{
3762        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3763        struct bfa_itn_s *itn;
3764
3765        itn =  BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
3766        itn->isr = isr;
3767}
3768
3769/*
3770 * Itn interrupt processing.
3771 */
3772void
3773bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3774{
3775        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3776        union bfi_itn_i2h_msg_u msg;
3777        struct bfa_itn_s *itn;
3778
3779        msg.msg = m;
3780        itn =  BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
3781
3782        if (itn->isr)
3783                itn->isr(bfa, m);
3784        else
3785                WARN_ON(1);
3786}
3787
3788void
3789bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
3790{
3791        struct bfa_iotag_s *iotag;
3792        u16     num_io_req, i;
3793
3794        iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
3795        fcp->iotag_arr = iotag;
3796
3797        INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
3798        INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
3799        INIT_LIST_HEAD(&fcp->iotag_unused_q);
3800
3801        num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
3802        for (i = 0; i < num_io_req; i++, iotag++) {
3803                memset(iotag, 0, sizeof(struct bfa_iotag_s));
3804                iotag->tag = i;
3805                if (i < fcp->num_ioim_reqs)
3806                        list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
3807                else
3808                        list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
3809        }
3810
3811        bfa_mem_kva_curp(fcp) = (u8 *) iotag;
3812}
3813
3814
3815/*
3816 * To send config req, first try to use throttle value from flash
3817 * If 0, then use driver parameter
3818 * We need to use min(flash_val, drv_val) because
3819 * memory allocation was done based on this cfg'd value
3820 */
3821u16
3822bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
3823{
3824        u16 tmp;
3825        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3826
3827        /*
3828         * If throttle value from flash is already in effect after driver is
3829         * loaded then until next load, always return current value instead
3830         * of actual flash value
3831         */
3832        if (!fcp->throttle_update_required)
3833                return (u16)fcp->num_ioim_reqs;
3834
3835        tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
3836        if (!tmp || (tmp > drv_cfg_param))
3837                tmp = drv_cfg_param;
3838
3839        return tmp;
3840}
3841
3842bfa_status_t
3843bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value)
3844{
3845        if (!bfa_dconf_get_min_cfg(bfa)) {
3846                BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
3847                BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
3848                return BFA_STATUS_OK;
3849        }
3850
3851        return BFA_STATUS_FAILED;
3852}
3853
3854u16
3855bfa_fcpim_read_throttle(struct bfa_s *bfa)
3856{
3857        struct bfa_throttle_cfg_s *throttle_cfg =
3858                        &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
3859
3860        return ((!bfa_dconf_get_min_cfg(bfa)) ?
3861               ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
3862}
3863
3864bfa_status_t
3865bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value)
3866{
3867        /* in min cfg no commands should run. */
3868        if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3869            (!bfa_dconf_read_data_valid(bfa)))
3870                return BFA_STATUS_FAILED;
3871
3872        bfa_fcpim_write_throttle(bfa, value);
3873
3874        return bfa_dconf_update(bfa);
3875}
3876
3877bfa_status_t
3878bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
3879{
3880        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3881        struct bfa_defs_fcpim_throttle_s throttle;
3882
3883        if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3884            (!bfa_dconf_read_data_valid(bfa)))
3885                return BFA_STATUS_FAILED;
3886
3887        memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
3888
3889        throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
3890        throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
3891        if (!throttle.cfg_value)
3892                throttle.cfg_value = throttle.cur_value;
3893        throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
3894        memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
3895
3896        return BFA_STATUS_OK;
3897}
3898