linux/drivers/scsi/bfa/bfa_fcpim.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   3 * Copyright (c) 2014- QLogic Corporation.
   4 * All rights reserved
   5 * www.qlogic.com
   6 *
   7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
   8 *
   9 * This program is free software; you can redistribute it and/or modify it
  10 * under the terms of the GNU General Public License (GPL) Version 2 as
  11 * published by the Free Software Foundation
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 */
  18
  19#include "bfad_drv.h"
  20#include "bfa_modules.h"
  21
  22BFA_TRC_FILE(HAL, FCPIM);
  23
  24/*
  25 *  BFA ITNIM Related definitions
  26 */
  27static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
  28static void bfa_ioim_lm_init(struct bfa_s *bfa);
  29
  30#define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
  31        (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
  32
  33#define bfa_fcpim_additn(__itnim)                                       \
  34        list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
  35#define bfa_fcpim_delitn(__itnim)       do {                            \
  36        WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));   \
  37        bfa_itnim_update_del_itn_stats(__itnim);      \
  38        list_del(&(__itnim)->qe);      \
  39        WARN_ON(!list_empty(&(__itnim)->io_q));                         \
  40        WARN_ON(!list_empty(&(__itnim)->io_cleanup_q));                 \
  41        WARN_ON(!list_empty(&(__itnim)->pending_q));                    \
  42} while (0)
  43
  44#define bfa_itnim_online_cb(__itnim) do {                               \
  45        if ((__itnim)->bfa->fcs)                                        \
  46                bfa_cb_itnim_online((__itnim)->ditn);      \
  47        else {                                                          \
  48                bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
  49                __bfa_cb_itnim_online, (__itnim));      \
  50        }                                                               \
  51} while (0)
  52
  53#define bfa_itnim_offline_cb(__itnim) do {                              \
  54        if ((__itnim)->bfa->fcs)                                        \
  55                bfa_cb_itnim_offline((__itnim)->ditn);      \
  56        else {                                                          \
  57                bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
  58                __bfa_cb_itnim_offline, (__itnim));      \
  59        }                                                               \
  60} while (0)
  61
  62#define bfa_itnim_sler_cb(__itnim) do {                                 \
  63        if ((__itnim)->bfa->fcs)                                        \
  64                bfa_cb_itnim_sler((__itnim)->ditn);      \
  65        else {                                                          \
  66                bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,        \
  67                __bfa_cb_itnim_sler, (__itnim));      \
  68        }                                                               \
  69} while (0)
  70
  71enum bfa_ioim_lm_ua_status {
  72        BFA_IOIM_LM_UA_RESET = 0,
  73        BFA_IOIM_LM_UA_SET = 1,
  74};
  75
  76/*
  77 *  itnim state machine event
  78 */
  79enum bfa_itnim_event {
  80        BFA_ITNIM_SM_CREATE = 1,        /*  itnim is created */
  81        BFA_ITNIM_SM_ONLINE = 2,        /*  itnim is online */
  82        BFA_ITNIM_SM_OFFLINE = 3,       /*  itnim is offline */
  83        BFA_ITNIM_SM_FWRSP = 4,         /*  firmware response */
  84        BFA_ITNIM_SM_DELETE = 5,        /*  deleting an existing itnim */
  85        BFA_ITNIM_SM_CLEANUP = 6,       /*  IO cleanup completion */
  86        BFA_ITNIM_SM_SLER = 7,          /*  second level error recovery */
  87        BFA_ITNIM_SM_HWFAIL = 8,        /*  IOC h/w failure event */
  88        BFA_ITNIM_SM_QRESUME = 9,       /*  queue space available */
  89};
  90
  91/*
  92 *  BFA IOIM related definitions
  93 */
  94#define bfa_ioim_move_to_comp_q(__ioim) do {                            \
  95        list_del(&(__ioim)->qe);                                        \
  96        list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q);    \
  97} while (0)
  98
  99
 100#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do {                  \
 101        if ((__fcpim)->profile_comp)                                    \
 102                (__fcpim)->profile_comp(__ioim);                        \
 103} while (0)
 104
 105#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do {                 \
 106        if ((__fcpim)->profile_start)                                   \
 107                (__fcpim)->profile_start(__ioim);                       \
 108} while (0)
 109
 110/*
 111 * IO state machine events
 112 */
 113enum bfa_ioim_event {
 114        BFA_IOIM_SM_START       = 1,    /*  io start request from host */
 115        BFA_IOIM_SM_COMP_GOOD   = 2,    /*  io good comp, resource free */
 116        BFA_IOIM_SM_COMP        = 3,    /*  io comp, resource is free */
 117        BFA_IOIM_SM_COMP_UTAG   = 4,    /*  io comp, resource is free */
 118        BFA_IOIM_SM_DONE        = 5,    /*  io comp, resource not free */
 119        BFA_IOIM_SM_FREE        = 6,    /*  io resource is freed */
 120        BFA_IOIM_SM_ABORT       = 7,    /*  abort request from scsi stack */
 121        BFA_IOIM_SM_ABORT_COMP  = 8,    /*  abort from f/w */
 122        BFA_IOIM_SM_ABORT_DONE  = 9,    /*  abort completion from f/w */
 123        BFA_IOIM_SM_QRESUME     = 10,   /*  CQ space available to queue IO */
 124        BFA_IOIM_SM_SGALLOCED   = 11,   /*  SG page allocation successful */
 125        BFA_IOIM_SM_SQRETRY     = 12,   /*  sequence recovery retry */
 126        BFA_IOIM_SM_HCB         = 13,   /*  bfa callback complete */
 127        BFA_IOIM_SM_CLEANUP     = 14,   /*  IO cleanup from itnim */
 128        BFA_IOIM_SM_TMSTART     = 15,   /*  IO cleanup from tskim */
 129        BFA_IOIM_SM_TMDONE      = 16,   /*  IO cleanup from tskim */
 130        BFA_IOIM_SM_HWFAIL      = 17,   /*  IOC h/w failure event */
 131        BFA_IOIM_SM_IOTOV       = 18,   /*  ITN offline TOV */
 132};
 133
 134
 135/*
 136 *  BFA TSKIM related definitions
 137 */
 138
 139/*
 140 * task management completion handling
 141 */
 142#define bfa_tskim_qcomp(__tskim, __cbfn) do {                           \
 143        bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
 144        bfa_tskim_notify_comp(__tskim);      \
 145} while (0)
 146
 147#define bfa_tskim_notify_comp(__tskim) do {                             \
 148        if ((__tskim)->notify)                                          \
 149                bfa_itnim_tskdone((__tskim)->itnim);      \
 150} while (0)
 151
 152
 153enum bfa_tskim_event {
 154        BFA_TSKIM_SM_START      = 1,    /*  TM command start            */
 155        BFA_TSKIM_SM_DONE       = 2,    /*  TM completion               */
 156        BFA_TSKIM_SM_QRESUME    = 3,    /*  resume after qfull          */
 157        BFA_TSKIM_SM_HWFAIL     = 5,    /*  IOC h/w failure event       */
 158        BFA_TSKIM_SM_HCB        = 6,    /*  BFA callback completion     */
 159        BFA_TSKIM_SM_IOS_DONE   = 7,    /*  IO and sub TM completions   */
 160        BFA_TSKIM_SM_CLEANUP    = 8,    /*  TM cleanup on ITN offline   */
 161        BFA_TSKIM_SM_CLEANUP_DONE = 9,  /*  TM abort completion */
 162        BFA_TSKIM_SM_UTAG       = 10,   /*  TM completion unknown tag  */
 163};
 164
 165/*
 166 * forward declaration for BFA ITNIM functions
 167 */
 168static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
 169static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
 170static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
 171static void     bfa_itnim_cleanp_comp(void *itnim_cbarg);
 172static void     bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
 173static void     __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
 174static void     __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
 175static void     __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
 176static void     bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
 177static void     bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
 178static void     bfa_itnim_iotov(void *itnim_arg);
 179static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
 180static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
 181static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
 182
 183/*
 184 * forward declaration of ITNIM state machine
 185 */
 186static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
 187                                        enum bfa_itnim_event event);
 188static void     bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
 189                                        enum bfa_itnim_event event);
 190static void     bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
 191                                        enum bfa_itnim_event event);
 192static void     bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
 193                                        enum bfa_itnim_event event);
 194static void     bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
 195                                        enum bfa_itnim_event event);
 196static void     bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
 197                                        enum bfa_itnim_event event);
 198static void     bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
 199                                        enum bfa_itnim_event event);
 200static void     bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
 201                                        enum bfa_itnim_event event);
 202static void     bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
 203                                        enum bfa_itnim_event event);
 204static void     bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
 205                                        enum bfa_itnim_event event);
 206static void     bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
 207                                        enum bfa_itnim_event event);
 208static void     bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
 209                                        enum bfa_itnim_event event);
 210static void     bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
 211                                        enum bfa_itnim_event event);
 212static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
 213                                        enum bfa_itnim_event event);
 214static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
 215                                        enum bfa_itnim_event event);
 216
 217/*
 218 * forward declaration for BFA IOIM functions
 219 */
 220static bfa_boolean_t    bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
 221static bfa_boolean_t    bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
 222static bfa_boolean_t    bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
 223static void             bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
 224static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
 225static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
 226static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
 227static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
 228static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
 229static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
 230
 231/*
 232 * forward declaration of BFA IO state machine
 233 */
 234static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
 235                                        enum bfa_ioim_event event);
 236static void     bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
 237                                        enum bfa_ioim_event event);
 238static void     bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
 239                                        enum bfa_ioim_event event);
 240static void     bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
 241                                        enum bfa_ioim_event event);
 242static void     bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
 243                                        enum bfa_ioim_event event);
 244static void     bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
 245                                        enum bfa_ioim_event event);
 246static void     bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
 247                                        enum bfa_ioim_event event);
 248static void     bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
 249                                        enum bfa_ioim_event event);
 250static void     bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
 251                                        enum bfa_ioim_event event);
 252static void     bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
 253                                        enum bfa_ioim_event event);
 254static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
 255                                        enum bfa_ioim_event event);
 256static void     bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
 257                                        enum bfa_ioim_event event);
 258/*
 259 * forward declaration for BFA TSKIM functions
 260 */
 261static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
 262static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
 263static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
 264                                        struct scsi_lun lun);
 265static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
 266static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
 267static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
 268static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
 269static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
 270static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
 271
 272/*
 273 * forward declaration of BFA TSKIM state machine
 274 */
 275static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
 276                                        enum bfa_tskim_event event);
 277static void     bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
 278                                        enum bfa_tskim_event event);
 279static void     bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
 280                                        enum bfa_tskim_event event);
 281static void     bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
 282                                        enum bfa_tskim_event event);
 283static void     bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
 284                                        enum bfa_tskim_event event);
 285static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
 286                                        enum bfa_tskim_event event);
 287static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
 288                                        enum bfa_tskim_event event);
 289/*
 290 *  BFA FCP Initiator Mode module
 291 */
 292
 293/*
 294 * Compute and return memory needed by FCP(im) module.
 295 */
 296static void
 297bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
 298{
 299        bfa_itnim_meminfo(cfg, km_len);
 300
 301        /*
 302         * IO memory
 303         */
 304        *km_len += cfg->fwcfg.num_ioim_reqs *
 305          (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
 306
 307        /*
 308         * task management command memory
 309         */
 310        if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
 311                cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
 312        *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
 313}
 314
 315
 316static void
 317bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
 318                struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
 319{
 320        struct bfa_fcpim_s *fcpim = &fcp->fcpim;
 321        struct bfa_s *bfa = fcp->bfa;
 322
 323        bfa_trc(bfa, cfg->drvcfg.path_tov);
 324        bfa_trc(bfa, cfg->fwcfg.num_rports);
 325        bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
 326        bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
 327
 328        fcpim->fcp              = fcp;
 329        fcpim->bfa              = bfa;
 330        fcpim->num_itnims       = cfg->fwcfg.num_rports;
 331        fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
 332        fcpim->path_tov         = cfg->drvcfg.path_tov;
 333        fcpim->delay_comp       = cfg->drvcfg.delay_comp;
 334        fcpim->profile_comp = NULL;
 335        fcpim->profile_start = NULL;
 336
 337        bfa_itnim_attach(fcpim);
 338        bfa_tskim_attach(fcpim);
 339        bfa_ioim_attach(fcpim);
 340}
 341
 342static void
 343bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
 344{
 345        struct bfa_fcpim_s *fcpim = &fcp->fcpim;
 346        struct bfa_itnim_s *itnim;
 347        struct list_head *qe, *qen;
 348
 349        /* Enqueue unused ioim resources to free_q */
 350        list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
 351
 352        list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 353                itnim = (struct bfa_itnim_s *) qe;
 354                bfa_itnim_iocdisable(itnim);
 355        }
 356}
 357
 358void
 359bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
 360{
 361        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 362
 363        fcpim->path_tov = path_tov * 1000;
 364        if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
 365                fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
 366}
 367
 368u16
 369bfa_fcpim_path_tov_get(struct bfa_s *bfa)
 370{
 371        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 372
 373        return fcpim->path_tov / 1000;
 374}
 375
 376#define bfa_fcpim_add_iostats(__l, __r, __stats)        \
 377        (__l->__stats += __r->__stats)
 378
 379void
 380bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
 381                struct bfa_itnim_iostats_s *rstats)
 382{
 383        bfa_fcpim_add_iostats(lstats, rstats, total_ios);
 384        bfa_fcpim_add_iostats(lstats, rstats, qresumes);
 385        bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
 386        bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
 387        bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
 388        bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
 389        bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
 390        bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
 391        bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
 392        bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
 393        bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
 394        bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
 395        bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
 396        bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
 397        bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
 398        bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
 399        bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
 400        bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
 401        bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
 402        bfa_fcpim_add_iostats(lstats, rstats, onlines);
 403        bfa_fcpim_add_iostats(lstats, rstats, offlines);
 404        bfa_fcpim_add_iostats(lstats, rstats, creates);
 405        bfa_fcpim_add_iostats(lstats, rstats, deletes);
 406        bfa_fcpim_add_iostats(lstats, rstats, create_comps);
 407        bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
 408        bfa_fcpim_add_iostats(lstats, rstats, sler_events);
 409        bfa_fcpim_add_iostats(lstats, rstats, fw_create);
 410        bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
 411        bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
 412        bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
 413        bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
 414        bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
 415        bfa_fcpim_add_iostats(lstats, rstats, tm_success);
 416        bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
 417        bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
 418        bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
 419        bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
 420        bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
 421        bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
 422        bfa_fcpim_add_iostats(lstats, rstats, io_comps);
 423        bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
 424        bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
 425        bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
 426        bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
 427}
 428
 429bfa_status_t
 430bfa_fcpim_port_iostats(struct bfa_s *bfa,
 431                struct bfa_itnim_iostats_s *stats, u8 lp_tag)
 432{
 433        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 434        struct list_head *qe, *qen;
 435        struct bfa_itnim_s *itnim;
 436
 437        /* accumulate IO stats from itnim */
 438        memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
 439        list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 440                itnim = (struct bfa_itnim_s *) qe;
 441                if (itnim->rport->rport_info.lp_tag != lp_tag)
 442                        continue;
 443                bfa_fcpim_add_stats(stats, &(itnim->stats));
 444        }
 445        return BFA_STATUS_OK;
 446}
 447
 448void
 449bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
 450{
 451        struct bfa_itnim_latency_s *io_lat =
 452                        &(ioim->itnim->ioprofile.io_latency);
 453        u32 val, idx;
 454
 455        val = (u32)(jiffies - ioim->start_time);
 456        idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
 457        bfa_itnim_ioprofile_update(ioim->itnim, idx);
 458
 459        io_lat->count[idx]++;
 460        io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
 461        io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
 462        io_lat->avg[idx] += val;
 463}
 464
 465void
 466bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
 467{
 468        ioim->start_time = jiffies;
 469}
 470
 471bfa_status_t
 472bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
 473{
 474        struct bfa_itnim_s *itnim;
 475        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 476        struct list_head *qe, *qen;
 477
 478        /* accumulate IO stats from itnim */
 479        list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 480                itnim = (struct bfa_itnim_s *) qe;
 481                bfa_itnim_clear_stats(itnim);
 482        }
 483        fcpim->io_profile = BFA_TRUE;
 484        fcpim->io_profile_start_time = time;
 485        fcpim->profile_comp = bfa_ioim_profile_comp;
 486        fcpim->profile_start = bfa_ioim_profile_start;
 487        return BFA_STATUS_OK;
 488}
 489
 490bfa_status_t
 491bfa_fcpim_profile_off(struct bfa_s *bfa)
 492{
 493        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 494        fcpim->io_profile = BFA_FALSE;
 495        fcpim->io_profile_start_time = 0;
 496        fcpim->profile_comp = NULL;
 497        fcpim->profile_start = NULL;
 498        return BFA_STATUS_OK;
 499}
 500
 501u16
 502bfa_fcpim_qdepth_get(struct bfa_s *bfa)
 503{
 504        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
 505
 506        return fcpim->q_depth;
 507}
 508
 509/*
 510 *  BFA ITNIM module state machine functions
 511 */
 512
 513/*
 514 * Beginning/unallocated state - no events expected.
 515 */
 516static void
 517bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 518{
 519        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 520        bfa_trc(itnim->bfa, event);
 521
 522        switch (event) {
 523        case BFA_ITNIM_SM_CREATE:
 524                bfa_sm_set_state(itnim, bfa_itnim_sm_created);
 525                itnim->is_online = BFA_FALSE;
 526                bfa_fcpim_additn(itnim);
 527                break;
 528
 529        default:
 530                bfa_sm_fault(itnim->bfa, event);
 531        }
 532}
 533
 534/*
 535 * Beginning state, only online event expected.
 536 */
 537static void
 538bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 539{
 540        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 541        bfa_trc(itnim->bfa, event);
 542
 543        switch (event) {
 544        case BFA_ITNIM_SM_ONLINE:
 545                if (bfa_itnim_send_fwcreate(itnim))
 546                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
 547                else
 548                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
 549                break;
 550
 551        case BFA_ITNIM_SM_DELETE:
 552                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 553                bfa_fcpim_delitn(itnim);
 554                break;
 555
 556        case BFA_ITNIM_SM_HWFAIL:
 557                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 558                break;
 559
 560        default:
 561                bfa_sm_fault(itnim->bfa, event);
 562        }
 563}
 564
 565/*
 566 *      Waiting for itnim create response from firmware.
 567 */
 568static void
 569bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 570{
 571        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 572        bfa_trc(itnim->bfa, event);
 573
 574        switch (event) {
 575        case BFA_ITNIM_SM_FWRSP:
 576                bfa_sm_set_state(itnim, bfa_itnim_sm_online);
 577                itnim->is_online = BFA_TRUE;
 578                bfa_itnim_iotov_online(itnim);
 579                bfa_itnim_online_cb(itnim);
 580                break;
 581
 582        case BFA_ITNIM_SM_DELETE:
 583                bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
 584                break;
 585
 586        case BFA_ITNIM_SM_OFFLINE:
 587                if (bfa_itnim_send_fwdelete(itnim))
 588                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
 589                else
 590                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
 591                break;
 592
 593        case BFA_ITNIM_SM_HWFAIL:
 594                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 595                break;
 596
 597        default:
 598                bfa_sm_fault(itnim->bfa, event);
 599        }
 600}
 601
 602static void
 603bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
 604                        enum bfa_itnim_event event)
 605{
 606        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 607        bfa_trc(itnim->bfa, event);
 608
 609        switch (event) {
 610        case BFA_ITNIM_SM_QRESUME:
 611                bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
 612                bfa_itnim_send_fwcreate(itnim);
 613                break;
 614
 615        case BFA_ITNIM_SM_DELETE:
 616                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 617                bfa_reqq_wcancel(&itnim->reqq_wait);
 618                bfa_fcpim_delitn(itnim);
 619                break;
 620
 621        case BFA_ITNIM_SM_OFFLINE:
 622                bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
 623                bfa_reqq_wcancel(&itnim->reqq_wait);
 624                bfa_itnim_offline_cb(itnim);
 625                break;
 626
 627        case BFA_ITNIM_SM_HWFAIL:
 628                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 629                bfa_reqq_wcancel(&itnim->reqq_wait);
 630                break;
 631
 632        default:
 633                bfa_sm_fault(itnim->bfa, event);
 634        }
 635}
 636
 637/*
 638 * Waiting for itnim create response from firmware, a delete is pending.
 639 */
 640static void
 641bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
 642                                enum bfa_itnim_event event)
 643{
 644        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 645        bfa_trc(itnim->bfa, event);
 646
 647        switch (event) {
 648        case BFA_ITNIM_SM_FWRSP:
 649                if (bfa_itnim_send_fwdelete(itnim))
 650                        bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
 651                else
 652                        bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
 653                break;
 654
 655        case BFA_ITNIM_SM_HWFAIL:
 656                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 657                bfa_fcpim_delitn(itnim);
 658                break;
 659
 660        default:
 661                bfa_sm_fault(itnim->bfa, event);
 662        }
 663}
 664
 665/*
 666 * Online state - normal parking state.
 667 */
 668static void
 669bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 670{
 671        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 672        bfa_trc(itnim->bfa, event);
 673
 674        switch (event) {
 675        case BFA_ITNIM_SM_OFFLINE:
 676                bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
 677                itnim->is_online = BFA_FALSE;
 678                bfa_itnim_iotov_start(itnim);
 679                bfa_itnim_cleanup(itnim);
 680                break;
 681
 682        case BFA_ITNIM_SM_DELETE:
 683                bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
 684                itnim->is_online = BFA_FALSE;
 685                bfa_itnim_cleanup(itnim);
 686                break;
 687
 688        case BFA_ITNIM_SM_SLER:
 689                bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
 690                itnim->is_online = BFA_FALSE;
 691                bfa_itnim_iotov_start(itnim);
 692                bfa_itnim_sler_cb(itnim);
 693                break;
 694
 695        case BFA_ITNIM_SM_HWFAIL:
 696                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 697                itnim->is_online = BFA_FALSE;
 698                bfa_itnim_iotov_start(itnim);
 699                bfa_itnim_iocdisable_cleanup(itnim);
 700                break;
 701
 702        default:
 703                bfa_sm_fault(itnim->bfa, event);
 704        }
 705}
 706
 707/*
 708 * Second level error recovery need.
 709 */
 710static void
 711bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 712{
 713        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 714        bfa_trc(itnim->bfa, event);
 715
 716        switch (event) {
 717        case BFA_ITNIM_SM_OFFLINE:
 718                bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
 719                bfa_itnim_cleanup(itnim);
 720                break;
 721
 722        case BFA_ITNIM_SM_DELETE:
 723                bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
 724                bfa_itnim_cleanup(itnim);
 725                bfa_itnim_iotov_delete(itnim);
 726                break;
 727
 728        case BFA_ITNIM_SM_HWFAIL:
 729                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 730                bfa_itnim_iocdisable_cleanup(itnim);
 731                break;
 732
 733        default:
 734                bfa_sm_fault(itnim->bfa, event);
 735        }
 736}
 737
 738/*
 739 * Going offline. Waiting for active IO cleanup.
 740 */
 741static void
 742bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
 743                                 enum bfa_itnim_event event)
 744{
 745        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 746        bfa_trc(itnim->bfa, event);
 747
 748        switch (event) {
 749        case BFA_ITNIM_SM_CLEANUP:
 750                if (bfa_itnim_send_fwdelete(itnim))
 751                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
 752                else
 753                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
 754                break;
 755
 756        case BFA_ITNIM_SM_DELETE:
 757                bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
 758                bfa_itnim_iotov_delete(itnim);
 759                break;
 760
 761        case BFA_ITNIM_SM_HWFAIL:
 762                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 763                bfa_itnim_iocdisable_cleanup(itnim);
 764                bfa_itnim_offline_cb(itnim);
 765                break;
 766
 767        case BFA_ITNIM_SM_SLER:
 768                break;
 769
 770        default:
 771                bfa_sm_fault(itnim->bfa, event);
 772        }
 773}
 774
 775/*
 776 * Deleting itnim. Waiting for active IO cleanup.
 777 */
 778static void
 779bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
 780                                enum bfa_itnim_event event)
 781{
 782        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 783        bfa_trc(itnim->bfa, event);
 784
 785        switch (event) {
 786        case BFA_ITNIM_SM_CLEANUP:
 787                if (bfa_itnim_send_fwdelete(itnim))
 788                        bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
 789                else
 790                        bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
 791                break;
 792
 793        case BFA_ITNIM_SM_HWFAIL:
 794                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 795                bfa_itnim_iocdisable_cleanup(itnim);
 796                break;
 797
 798        default:
 799                bfa_sm_fault(itnim->bfa, event);
 800        }
 801}
 802
 803/*
 804 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
 805 */
 806static void
 807bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 808{
 809        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 810        bfa_trc(itnim->bfa, event);
 811
 812        switch (event) {
 813        case BFA_ITNIM_SM_FWRSP:
 814                bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
 815                bfa_itnim_offline_cb(itnim);
 816                break;
 817
 818        case BFA_ITNIM_SM_DELETE:
 819                bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
 820                break;
 821
 822        case BFA_ITNIM_SM_HWFAIL:
 823                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 824                bfa_itnim_offline_cb(itnim);
 825                break;
 826
 827        default:
 828                bfa_sm_fault(itnim->bfa, event);
 829        }
 830}
 831
 832static void
 833bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
 834                        enum bfa_itnim_event event)
 835{
 836        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 837        bfa_trc(itnim->bfa, event);
 838
 839        switch (event) {
 840        case BFA_ITNIM_SM_QRESUME:
 841                bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
 842                bfa_itnim_send_fwdelete(itnim);
 843                break;
 844
 845        case BFA_ITNIM_SM_DELETE:
 846                bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
 847                break;
 848
 849        case BFA_ITNIM_SM_HWFAIL:
 850                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 851                bfa_reqq_wcancel(&itnim->reqq_wait);
 852                bfa_itnim_offline_cb(itnim);
 853                break;
 854
 855        default:
 856                bfa_sm_fault(itnim->bfa, event);
 857        }
 858}
 859
 860/*
 861 * Offline state.
 862 */
 863static void
 864bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 865{
 866        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 867        bfa_trc(itnim->bfa, event);
 868
 869        switch (event) {
 870        case BFA_ITNIM_SM_DELETE:
 871                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 872                bfa_itnim_iotov_delete(itnim);
 873                bfa_fcpim_delitn(itnim);
 874                break;
 875
 876        case BFA_ITNIM_SM_ONLINE:
 877                if (bfa_itnim_send_fwcreate(itnim))
 878                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
 879                else
 880                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
 881                break;
 882
 883        case BFA_ITNIM_SM_HWFAIL:
 884                bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
 885                break;
 886
 887        default:
 888                bfa_sm_fault(itnim->bfa, event);
 889        }
 890}
 891
 892static void
 893bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
 894                                enum bfa_itnim_event event)
 895{
 896        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 897        bfa_trc(itnim->bfa, event);
 898
 899        switch (event) {
 900        case BFA_ITNIM_SM_DELETE:
 901                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 902                bfa_itnim_iotov_delete(itnim);
 903                bfa_fcpim_delitn(itnim);
 904                break;
 905
 906        case BFA_ITNIM_SM_OFFLINE:
 907                bfa_itnim_offline_cb(itnim);
 908                break;
 909
 910        case BFA_ITNIM_SM_ONLINE:
 911                if (bfa_itnim_send_fwcreate(itnim))
 912                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
 913                else
 914                        bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
 915                break;
 916
 917        case BFA_ITNIM_SM_HWFAIL:
 918                break;
 919
 920        default:
 921                bfa_sm_fault(itnim->bfa, event);
 922        }
 923}
 924
 925/*
 926 * Itnim is deleted, waiting for firmware response to delete.
 927 */
 928static void
 929bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
 930{
 931        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 932        bfa_trc(itnim->bfa, event);
 933
 934        switch (event) {
 935        case BFA_ITNIM_SM_FWRSP:
 936        case BFA_ITNIM_SM_HWFAIL:
 937                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 938                bfa_fcpim_delitn(itnim);
 939                break;
 940
 941        default:
 942                bfa_sm_fault(itnim->bfa, event);
 943        }
 944}
 945
 946static void
 947bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
 948                enum bfa_itnim_event event)
 949{
 950        bfa_trc(itnim->bfa, itnim->rport->rport_tag);
 951        bfa_trc(itnim->bfa, event);
 952
 953        switch (event) {
 954        case BFA_ITNIM_SM_QRESUME:
 955                bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
 956                bfa_itnim_send_fwdelete(itnim);
 957                break;
 958
 959        case BFA_ITNIM_SM_HWFAIL:
 960                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
 961                bfa_reqq_wcancel(&itnim->reqq_wait);
 962                bfa_fcpim_delitn(itnim);
 963                break;
 964
 965        default:
 966                bfa_sm_fault(itnim->bfa, event);
 967        }
 968}
 969
 970/*
 971 * Initiate cleanup of all IOs on an IOC failure.
 972 */
 973static void
 974bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
 975{
 976        struct bfa_tskim_s *tskim;
 977        struct bfa_ioim_s *ioim;
 978        struct list_head        *qe, *qen;
 979
 980        list_for_each_safe(qe, qen, &itnim->tsk_q) {
 981                tskim = (struct bfa_tskim_s *) qe;
 982                bfa_tskim_iocdisable(tskim);
 983        }
 984
 985        list_for_each_safe(qe, qen, &itnim->io_q) {
 986                ioim = (struct bfa_ioim_s *) qe;
 987                bfa_ioim_iocdisable(ioim);
 988        }
 989
 990        /*
 991         * For IO request in pending queue, we pretend an early timeout.
 992         */
 993        list_for_each_safe(qe, qen, &itnim->pending_q) {
 994                ioim = (struct bfa_ioim_s *) qe;
 995                bfa_ioim_tov(ioim);
 996        }
 997
 998        list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
 999                ioim = (struct bfa_ioim_s *) qe;
1000                bfa_ioim_iocdisable(ioim);
1001        }
1002}
1003
1004/*
1005 * IO cleanup completion
1006 */
1007static void
1008bfa_itnim_cleanp_comp(void *itnim_cbarg)
1009{
1010        struct bfa_itnim_s *itnim = itnim_cbarg;
1011
1012        bfa_stats(itnim, cleanup_comps);
1013        bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1014}
1015
1016/*
1017 * Initiate cleanup of all IOs.
1018 */
1019static void
1020bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1021{
1022        struct bfa_ioim_s  *ioim;
1023        struct bfa_tskim_s *tskim;
1024        struct list_head        *qe, *qen;
1025
1026        bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1027
1028        list_for_each_safe(qe, qen, &itnim->io_q) {
1029                ioim = (struct bfa_ioim_s *) qe;
1030
1031                /*
1032                 * Move IO to a cleanup queue from active queue so that a later
1033                 * TM will not pickup this IO.
1034                 */
1035                list_del(&ioim->qe);
1036                list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1037
1038                bfa_wc_up(&itnim->wc);
1039                bfa_ioim_cleanup(ioim);
1040        }
1041
1042        list_for_each_safe(qe, qen, &itnim->tsk_q) {
1043                tskim = (struct bfa_tskim_s *) qe;
1044                bfa_wc_up(&itnim->wc);
1045                bfa_tskim_cleanup(tskim);
1046        }
1047
1048        bfa_wc_wait(&itnim->wc);
1049}
1050
1051static void
1052__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1053{
1054        struct bfa_itnim_s *itnim = cbarg;
1055
1056        if (complete)
1057                bfa_cb_itnim_online(itnim->ditn);
1058}
1059
1060static void
1061__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1062{
1063        struct bfa_itnim_s *itnim = cbarg;
1064
1065        if (complete)
1066                bfa_cb_itnim_offline(itnim->ditn);
1067}
1068
1069static void
1070__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1071{
1072        struct bfa_itnim_s *itnim = cbarg;
1073
1074        if (complete)
1075                bfa_cb_itnim_sler(itnim->ditn);
1076}
1077
1078/*
1079 * Call to resume any I/O requests waiting for room in request queue.
1080 */
1081static void
1082bfa_itnim_qresume(void *cbarg)
1083{
1084        struct bfa_itnim_s *itnim = cbarg;
1085
1086        bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1087}
1088
1089/*
1090 *  bfa_itnim_public
1091 */
1092
1093void
1094bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1095{
1096        bfa_wc_down(&itnim->wc);
1097}
1098
1099void
1100bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1101{
1102        bfa_wc_down(&itnim->wc);
1103}
1104
1105void
1106bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
1107{
1108        /*
1109         * ITN memory
1110         */
1111        *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1112}
1113
1114void
1115bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
1116{
1117        struct bfa_s    *bfa = fcpim->bfa;
1118        struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
1119        struct bfa_itnim_s *itnim;
1120        int     i, j;
1121
1122        INIT_LIST_HEAD(&fcpim->itnim_q);
1123
1124        itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
1125        fcpim->itnim_arr = itnim;
1126
1127        for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
1128                memset(itnim, 0, sizeof(struct bfa_itnim_s));
1129                itnim->bfa = bfa;
1130                itnim->fcpim = fcpim;
1131                itnim->reqq = BFA_REQQ_QOS_LO;
1132                itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1133                itnim->iotov_active = BFA_FALSE;
1134                bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1135
1136                INIT_LIST_HEAD(&itnim->io_q);
1137                INIT_LIST_HEAD(&itnim->io_cleanup_q);
1138                INIT_LIST_HEAD(&itnim->pending_q);
1139                INIT_LIST_HEAD(&itnim->tsk_q);
1140                INIT_LIST_HEAD(&itnim->delay_comp_q);
1141                for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1142                        itnim->ioprofile.io_latency.min[j] = ~0;
1143                bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1144        }
1145
1146        bfa_mem_kva_curp(fcp) = (u8 *) itnim;
1147}
1148
1149void
1150bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1151{
1152        bfa_stats(itnim, ioc_disabled);
1153        bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1154}
1155
1156static bfa_boolean_t
1157bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1158{
1159        struct bfi_itn_create_req_s *m;
1160
1161        itnim->msg_no++;
1162
1163        /*
1164         * check for room in queue to send request now
1165         */
1166        m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1167        if (!m) {
1168                bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1169                return BFA_FALSE;
1170        }
1171
1172        bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
1173                        bfa_fn_lpu(itnim->bfa));
1174        m->fw_handle = itnim->rport->fw_handle;
1175        m->class = FC_CLASS_3;
1176        m->seq_rec = itnim->seq_rec;
1177        m->msg_no = itnim->msg_no;
1178        bfa_stats(itnim, fw_create);
1179
1180        /*
1181         * queue I/O message to firmware
1182         */
1183        bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1184        return BFA_TRUE;
1185}
1186
1187static bfa_boolean_t
1188bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1189{
1190        struct bfi_itn_delete_req_s *m;
1191
1192        /*
1193         * check for room in queue to send request now
1194         */
1195        m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1196        if (!m) {
1197                bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1198                return BFA_FALSE;
1199        }
1200
1201        bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
1202                        bfa_fn_lpu(itnim->bfa));
1203        m->fw_handle = itnim->rport->fw_handle;
1204        bfa_stats(itnim, fw_delete);
1205
1206        /*
1207         * queue I/O message to firmware
1208         */
1209        bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
1210        return BFA_TRUE;
1211}
1212
1213/*
1214 * Cleanup all pending failed inflight requests.
1215 */
1216static void
1217bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1218{
1219        struct bfa_ioim_s *ioim;
1220        struct list_head *qe, *qen;
1221
1222        list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1223                ioim = (struct bfa_ioim_s *)qe;
1224                bfa_ioim_delayed_comp(ioim, iotov);
1225        }
1226}
1227
1228/*
1229 * Start all pending IO requests.
1230 */
1231static void
1232bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1233{
1234        struct bfa_ioim_s *ioim;
1235
1236        bfa_itnim_iotov_stop(itnim);
1237
1238        /*
1239         * Abort all inflight IO requests in the queue
1240         */
1241        bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1242
1243        /*
1244         * Start all pending IO requests.
1245         */
1246        while (!list_empty(&itnim->pending_q)) {
1247                bfa_q_deq(&itnim->pending_q, &ioim);
1248                list_add_tail(&ioim->qe, &itnim->io_q);
1249                bfa_ioim_start(ioim);
1250        }
1251}
1252
1253/*
1254 * Fail all pending IO requests
1255 */
1256static void
1257bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1258{
1259        struct bfa_ioim_s *ioim;
1260
1261        /*
1262         * Fail all inflight IO requests in the queue
1263         */
1264        bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1265
1266        /*
1267         * Fail any pending IO requests.
1268         */
1269        while (!list_empty(&itnim->pending_q)) {
1270                bfa_q_deq(&itnim->pending_q, &ioim);
1271                list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1272                bfa_ioim_tov(ioim);
1273        }
1274}
1275
1276/*
1277 * IO TOV timer callback. Fail any pending IO requests.
1278 */
1279static void
1280bfa_itnim_iotov(void *itnim_arg)
1281{
1282        struct bfa_itnim_s *itnim = itnim_arg;
1283
1284        itnim->iotov_active = BFA_FALSE;
1285
1286        bfa_cb_itnim_tov_begin(itnim->ditn);
1287        bfa_itnim_iotov_cleanup(itnim);
1288        bfa_cb_itnim_tov(itnim->ditn);
1289}
1290
1291/*
1292 * Start IO TOV timer for failing back pending IO requests in offline state.
1293 */
1294static void
1295bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1296{
1297        if (itnim->fcpim->path_tov > 0) {
1298
1299                itnim->iotov_active = BFA_TRUE;
1300                WARN_ON(!bfa_itnim_hold_io(itnim));
1301                bfa_timer_start(itnim->bfa, &itnim->timer,
1302                        bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1303        }
1304}
1305
1306/*
1307 * Stop IO TOV timer.
1308 */
1309static void
1310bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1311{
1312        if (itnim->iotov_active) {
1313                itnim->iotov_active = BFA_FALSE;
1314                bfa_timer_stop(&itnim->timer);
1315        }
1316}
1317
1318/*
1319 * Stop IO TOV timer.
1320 */
1321static void
1322bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1323{
1324        bfa_boolean_t pathtov_active = BFA_FALSE;
1325
1326        if (itnim->iotov_active)
1327                pathtov_active = BFA_TRUE;
1328
1329        bfa_itnim_iotov_stop(itnim);
1330        if (pathtov_active)
1331                bfa_cb_itnim_tov_begin(itnim->ditn);
1332        bfa_itnim_iotov_cleanup(itnim);
1333        if (pathtov_active)
1334                bfa_cb_itnim_tov(itnim->ditn);
1335}
1336
1337static void
1338bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1339{
1340        struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1341        fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1342                itnim->stats.iocomp_aborted;
1343        fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1344                itnim->stats.iocomp_timedout;
1345        fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1346                itnim->stats.iocom_sqer_needed;
1347        fcpim->del_itn_stats.del_itn_iocom_res_free +=
1348                itnim->stats.iocom_res_free;
1349        fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1350                itnim->stats.iocom_hostabrts;
1351        fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1352        fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1353        fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1354}
1355
1356/*
1357 * bfa_itnim_public
1358 */
1359
1360/*
1361 * Itnim interrupt processing.
1362 */
1363void
1364bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1365{
1366        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1367        union bfi_itn_i2h_msg_u msg;
1368        struct bfa_itnim_s *itnim;
1369
1370        bfa_trc(bfa, m->mhdr.msg_id);
1371
1372        msg.msg = m;
1373
1374        switch (m->mhdr.msg_id) {
1375        case BFI_ITN_I2H_CREATE_RSP:
1376                itnim = BFA_ITNIM_FROM_TAG(fcpim,
1377                                                msg.create_rsp->bfa_handle);
1378                WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
1379                bfa_stats(itnim, create_comps);
1380                bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1381                break;
1382
1383        case BFI_ITN_I2H_DELETE_RSP:
1384                itnim = BFA_ITNIM_FROM_TAG(fcpim,
1385                                                msg.delete_rsp->bfa_handle);
1386                WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
1387                bfa_stats(itnim, delete_comps);
1388                bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1389                break;
1390
1391        case BFI_ITN_I2H_SLER_EVENT:
1392                itnim = BFA_ITNIM_FROM_TAG(fcpim,
1393                                                msg.sler_event->bfa_handle);
1394                bfa_stats(itnim, sler_events);
1395                bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1396                break;
1397
1398        default:
1399                bfa_trc(bfa, m->mhdr.msg_id);
1400                WARN_ON(1);
1401        }
1402}
1403
1404/*
1405 * bfa_itnim_api
1406 */
1407
1408struct bfa_itnim_s *
1409bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1410{
1411        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
1412        struct bfa_itnim_s *itnim;
1413
1414        bfa_itn_create(bfa, rport, bfa_itnim_isr);
1415
1416        itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1417        WARN_ON(itnim->rport != rport);
1418
1419        itnim->ditn = ditn;
1420
1421        bfa_stats(itnim, creates);
1422        bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1423
1424        return itnim;
1425}
1426
1427void
1428bfa_itnim_delete(struct bfa_itnim_s *itnim)
1429{
1430        bfa_stats(itnim, deletes);
1431        bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1432}
1433
1434void
1435bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1436{
1437        itnim->seq_rec = seq_rec;
1438        bfa_stats(itnim, onlines);
1439        bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1440}
1441
1442void
1443bfa_itnim_offline(struct bfa_itnim_s *itnim)
1444{
1445        bfa_stats(itnim, offlines);
1446        bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1447}
1448
1449/*
1450 * Return true if itnim is considered offline for holding off IO request.
1451 * IO is not held if itnim is being deleted.
1452 */
1453bfa_boolean_t
1454bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1455{
1456        return itnim->fcpim->path_tov && itnim->iotov_active &&
1457                (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1458                 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1459                 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1460                 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1461                 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1462                 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1463}
1464
1465#define bfa_io_lat_clock_res_div        HZ
1466#define bfa_io_lat_clock_res_mul        1000
1467bfa_status_t
1468bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1469                        struct bfa_itnim_ioprofile_s *ioprofile)
1470{
1471        struct bfa_fcpim_s *fcpim;
1472
1473        if (!itnim)
1474                return BFA_STATUS_NO_FCPIM_NEXUS;
1475
1476        fcpim = BFA_FCPIM(itnim->bfa);
1477
1478        if (!fcpim->io_profile)
1479                return BFA_STATUS_IOPROFILE_OFF;
1480
1481        itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1482        itnim->ioprofile.io_profile_start_time =
1483                                bfa_io_profile_start_time(itnim->bfa);
1484        itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1485        itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1486        *ioprofile = itnim->ioprofile;
1487
1488        return BFA_STATUS_OK;
1489}
1490
1491void
1492bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1493{
1494        int j;
1495
1496        if (!itnim)
1497                return;
1498
1499        memset(&itnim->stats, 0, sizeof(itnim->stats));
1500        memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1501        for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1502                itnim->ioprofile.io_latency.min[j] = ~0;
1503}
1504
1505/*
1506 *  BFA IO module state machine functions
1507 */
1508
1509/*
1510 * IO is not started (unallocated).
1511 */
1512static void
1513bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1514{
1515        switch (event) {
1516        case BFA_IOIM_SM_START:
1517                if (!bfa_itnim_is_online(ioim->itnim)) {
1518                        if (!bfa_itnim_hold_io(ioim->itnim)) {
1519                                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1520                                list_del(&ioim->qe);
1521                                list_add_tail(&ioim->qe,
1522                                        &ioim->fcpim->ioim_comp_q);
1523                                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1524                                                __bfa_cb_ioim_pathtov, ioim);
1525                        } else {
1526                                list_del(&ioim->qe);
1527                                list_add_tail(&ioim->qe,
1528                                        &ioim->itnim->pending_q);
1529                        }
1530                        break;
1531                }
1532
1533                if (ioim->nsges > BFI_SGE_INLINE) {
1534                        if (!bfa_ioim_sgpg_alloc(ioim)) {
1535                                bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1536                                return;
1537                        }
1538                }
1539
1540                if (!bfa_ioim_send_ioreq(ioim)) {
1541                        bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1542                        break;
1543                }
1544
1545                bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1546                break;
1547
1548        case BFA_IOIM_SM_IOTOV:
1549                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1550                bfa_ioim_move_to_comp_q(ioim);
1551                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1552                                __bfa_cb_ioim_pathtov, ioim);
1553                break;
1554
1555        case BFA_IOIM_SM_ABORT:
1556                /*
1557                 * IO in pending queue can get abort requests. Complete abort
1558                 * requests immediately.
1559                 */
1560                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1561                WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1562                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1563                        __bfa_cb_ioim_abort, ioim);
1564                break;
1565
1566        default:
1567                bfa_sm_fault(ioim->bfa, event);
1568        }
1569}
1570
1571/*
1572 * IO is waiting for SG pages.
1573 */
1574static void
1575bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1576{
1577        bfa_trc(ioim->bfa, ioim->iotag);
1578        bfa_trc(ioim->bfa, event);
1579
1580        switch (event) {
1581        case BFA_IOIM_SM_SGALLOCED:
1582                if (!bfa_ioim_send_ioreq(ioim)) {
1583                        bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1584                        break;
1585                }
1586                bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1587                break;
1588
1589        case BFA_IOIM_SM_CLEANUP:
1590                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1591                bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1592                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1593                              ioim);
1594                bfa_ioim_notify_cleanup(ioim);
1595                break;
1596
1597        case BFA_IOIM_SM_ABORT:
1598                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1599                bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1600                bfa_ioim_move_to_comp_q(ioim);
1601                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1602                              ioim);
1603                break;
1604
1605        case BFA_IOIM_SM_HWFAIL:
1606                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1607                bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1608                bfa_ioim_move_to_comp_q(ioim);
1609                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1610                              ioim);
1611                break;
1612
1613        default:
1614                bfa_sm_fault(ioim->bfa, event);
1615        }
1616}
1617
1618/*
1619 * IO is active.
1620 */
1621static void
1622bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1623{
1624        switch (event) {
1625        case BFA_IOIM_SM_COMP_GOOD:
1626                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1627                bfa_ioim_move_to_comp_q(ioim);
1628                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1629                              __bfa_cb_ioim_good_comp, ioim);
1630                break;
1631
1632        case BFA_IOIM_SM_COMP:
1633                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1634                bfa_ioim_move_to_comp_q(ioim);
1635                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1636                              ioim);
1637                break;
1638
1639        case BFA_IOIM_SM_DONE:
1640                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1641                bfa_ioim_move_to_comp_q(ioim);
1642                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1643                              ioim);
1644                break;
1645
1646        case BFA_IOIM_SM_ABORT:
1647                ioim->iosp->abort_explicit = BFA_TRUE;
1648                ioim->io_cbfn = __bfa_cb_ioim_abort;
1649
1650                if (bfa_ioim_send_abort(ioim))
1651                        bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1652                else {
1653                        bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1654                        bfa_stats(ioim->itnim, qwait);
1655                        bfa_reqq_wait(ioim->bfa, ioim->reqq,
1656                                          &ioim->iosp->reqq_wait);
1657                }
1658                break;
1659
1660        case BFA_IOIM_SM_CLEANUP:
1661                ioim->iosp->abort_explicit = BFA_FALSE;
1662                ioim->io_cbfn = __bfa_cb_ioim_failed;
1663
1664                if (bfa_ioim_send_abort(ioim))
1665                        bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1666                else {
1667                        bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1668                        bfa_stats(ioim->itnim, qwait);
1669                        bfa_reqq_wait(ioim->bfa, ioim->reqq,
1670                                          &ioim->iosp->reqq_wait);
1671                }
1672                break;
1673
1674        case BFA_IOIM_SM_HWFAIL:
1675                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1676                bfa_ioim_move_to_comp_q(ioim);
1677                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1678                              ioim);
1679                break;
1680
1681        case BFA_IOIM_SM_SQRETRY:
1682                if (bfa_ioim_maxretry_reached(ioim)) {
1683                        /* max retry reached, free IO */
1684                        bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1685                        bfa_ioim_move_to_comp_q(ioim);
1686                        bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1687                                        __bfa_cb_ioim_failed, ioim);
1688                        break;
1689                }
1690                /* waiting for IO tag resource free */
1691                bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1692                break;
1693
1694        default:
1695                bfa_sm_fault(ioim->bfa, event);
1696        }
1697}
1698
1699/*
1700 * IO is retried with new tag.
1701 */
1702static void
1703bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1704{
1705        switch (event) {
1706        case BFA_IOIM_SM_FREE:
1707                /* abts and rrq done. Now retry the IO with new tag */
1708                bfa_ioim_update_iotag(ioim);
1709                if (!bfa_ioim_send_ioreq(ioim)) {
1710                        bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1711                        break;
1712                }
1713                bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1714        break;
1715
1716        case BFA_IOIM_SM_CLEANUP:
1717                ioim->iosp->abort_explicit = BFA_FALSE;
1718                ioim->io_cbfn = __bfa_cb_ioim_failed;
1719
1720                if (bfa_ioim_send_abort(ioim))
1721                        bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1722                else {
1723                        bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1724                        bfa_stats(ioim->itnim, qwait);
1725                        bfa_reqq_wait(ioim->bfa, ioim->reqq,
1726                                          &ioim->iosp->reqq_wait);
1727                }
1728        break;
1729
1730        case BFA_IOIM_SM_HWFAIL:
1731                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1732                bfa_ioim_move_to_comp_q(ioim);
1733                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1734                         __bfa_cb_ioim_failed, ioim);
1735                break;
1736
1737        case BFA_IOIM_SM_ABORT:
1738                /* in this state IO abort is done.
1739                 * Waiting for IO tag resource free.
1740                 */
1741                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1742                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1743                              ioim);
1744                break;
1745
1746        default:
1747                bfa_sm_fault(ioim->bfa, event);
1748        }
1749}
1750
1751/*
1752 * IO is being aborted, waiting for completion from firmware.
1753 */
1754static void
1755bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1756{
1757        bfa_trc(ioim->bfa, ioim->iotag);
1758        bfa_trc(ioim->bfa, event);
1759
1760        switch (event) {
1761        case BFA_IOIM_SM_COMP_GOOD:
1762        case BFA_IOIM_SM_COMP:
1763        case BFA_IOIM_SM_DONE:
1764        case BFA_IOIM_SM_FREE:
1765                break;
1766
1767        case BFA_IOIM_SM_ABORT_DONE:
1768                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1769                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1770                              ioim);
1771                break;
1772
1773        case BFA_IOIM_SM_ABORT_COMP:
1774                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1775                bfa_ioim_move_to_comp_q(ioim);
1776                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1777                              ioim);
1778                break;
1779
1780        case BFA_IOIM_SM_COMP_UTAG:
1781                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1782                bfa_ioim_move_to_comp_q(ioim);
1783                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1784                              ioim);
1785                break;
1786
1787        case BFA_IOIM_SM_CLEANUP:
1788                WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1789                ioim->iosp->abort_explicit = BFA_FALSE;
1790
1791                if (bfa_ioim_send_abort(ioim))
1792                        bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1793                else {
1794                        bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1795                        bfa_stats(ioim->itnim, qwait);
1796                        bfa_reqq_wait(ioim->bfa, ioim->reqq,
1797                                          &ioim->iosp->reqq_wait);
1798                }
1799                break;
1800
1801        case BFA_IOIM_SM_HWFAIL:
1802                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1803                bfa_ioim_move_to_comp_q(ioim);
1804                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1805                              ioim);
1806                break;
1807
1808        default:
1809                bfa_sm_fault(ioim->bfa, event);
1810        }
1811}
1812
1813/*
1814 * IO is being cleaned up (implicit abort), waiting for completion from
1815 * firmware.
1816 */
1817static void
1818bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1819{
1820        bfa_trc(ioim->bfa, ioim->iotag);
1821        bfa_trc(ioim->bfa, event);
1822
1823        switch (event) {
1824        case BFA_IOIM_SM_COMP_GOOD:
1825        case BFA_IOIM_SM_COMP:
1826        case BFA_IOIM_SM_DONE:
1827        case BFA_IOIM_SM_FREE:
1828                break;
1829
1830        case BFA_IOIM_SM_ABORT:
1831                /*
1832                 * IO is already being aborted implicitly
1833                 */
1834                ioim->io_cbfn = __bfa_cb_ioim_abort;
1835                break;
1836
1837        case BFA_IOIM_SM_ABORT_DONE:
1838                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1839                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1840                bfa_ioim_notify_cleanup(ioim);
1841                break;
1842
1843        case BFA_IOIM_SM_ABORT_COMP:
1844                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1845                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1846                bfa_ioim_notify_cleanup(ioim);
1847                break;
1848
1849        case BFA_IOIM_SM_COMP_UTAG:
1850                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1851                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1852                bfa_ioim_notify_cleanup(ioim);
1853                break;
1854
1855        case BFA_IOIM_SM_HWFAIL:
1856                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1857                bfa_ioim_move_to_comp_q(ioim);
1858                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1859                              ioim);
1860                break;
1861
1862        case BFA_IOIM_SM_CLEANUP:
1863                /*
1864                 * IO can be in cleanup state already due to TM command.
1865                 * 2nd cleanup request comes from ITN offline event.
1866                 */
1867                break;
1868
1869        default:
1870                bfa_sm_fault(ioim->bfa, event);
1871        }
1872}
1873
1874/*
1875 * IO is waiting for room in request CQ
1876 */
1877static void
1878bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1879{
1880        bfa_trc(ioim->bfa, ioim->iotag);
1881        bfa_trc(ioim->bfa, event);
1882
1883        switch (event) {
1884        case BFA_IOIM_SM_QRESUME:
1885                bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1886                bfa_ioim_send_ioreq(ioim);
1887                break;
1888
1889        case BFA_IOIM_SM_ABORT:
1890                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1891                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1892                bfa_ioim_move_to_comp_q(ioim);
1893                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1894                              ioim);
1895                break;
1896
1897        case BFA_IOIM_SM_CLEANUP:
1898                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1899                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1900                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1901                              ioim);
1902                bfa_ioim_notify_cleanup(ioim);
1903                break;
1904
1905        case BFA_IOIM_SM_HWFAIL:
1906                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1907                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1908                bfa_ioim_move_to_comp_q(ioim);
1909                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1910                              ioim);
1911                break;
1912
1913        default:
1914                bfa_sm_fault(ioim->bfa, event);
1915        }
1916}
1917
1918/*
1919 * Active IO is being aborted, waiting for room in request CQ.
1920 */
1921static void
1922bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1923{
1924        bfa_trc(ioim->bfa, ioim->iotag);
1925        bfa_trc(ioim->bfa, event);
1926
1927        switch (event) {
1928        case BFA_IOIM_SM_QRESUME:
1929                bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1930                bfa_ioim_send_abort(ioim);
1931                break;
1932
1933        case BFA_IOIM_SM_CLEANUP:
1934                WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
1935                ioim->iosp->abort_explicit = BFA_FALSE;
1936                bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1937                break;
1938
1939        case BFA_IOIM_SM_COMP_GOOD:
1940        case BFA_IOIM_SM_COMP:
1941                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1942                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1943                bfa_ioim_move_to_comp_q(ioim);
1944                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1945                              ioim);
1946                break;
1947
1948        case BFA_IOIM_SM_DONE:
1949                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1950                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1951                bfa_ioim_move_to_comp_q(ioim);
1952                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1953                              ioim);
1954                break;
1955
1956        case BFA_IOIM_SM_HWFAIL:
1957                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1958                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1959                bfa_ioim_move_to_comp_q(ioim);
1960                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1961                              ioim);
1962                break;
1963
1964        default:
1965                bfa_sm_fault(ioim->bfa, event);
1966        }
1967}
1968
1969/*
1970 * Active IO is being cleaned up, waiting for room in request CQ.
1971 */
1972static void
1973bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1974{
1975        bfa_trc(ioim->bfa, ioim->iotag);
1976        bfa_trc(ioim->bfa, event);
1977
1978        switch (event) {
1979        case BFA_IOIM_SM_QRESUME:
1980                bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1981                bfa_ioim_send_abort(ioim);
1982                break;
1983
1984        case BFA_IOIM_SM_ABORT:
1985                /*
1986                 * IO is already being cleaned up implicitly
1987                 */
1988                ioim->io_cbfn = __bfa_cb_ioim_abort;
1989                break;
1990
1991        case BFA_IOIM_SM_COMP_GOOD:
1992        case BFA_IOIM_SM_COMP:
1993                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1994                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1995                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1996                bfa_ioim_notify_cleanup(ioim);
1997                break;
1998
1999        case BFA_IOIM_SM_DONE:
2000                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2001                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2002                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2003                bfa_ioim_notify_cleanup(ioim);
2004                break;
2005
2006        case BFA_IOIM_SM_HWFAIL:
2007                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2008                bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2009                bfa_ioim_move_to_comp_q(ioim);
2010                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2011                              ioim);
2012                break;
2013
2014        default:
2015                bfa_sm_fault(ioim->bfa, event);
2016        }
2017}
2018
2019/*
2020 * IO bfa callback is pending.
2021 */
2022static void
2023bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2024{
2025        switch (event) {
2026        case BFA_IOIM_SM_HCB:
2027                bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2028                bfa_ioim_free(ioim);
2029                break;
2030
2031        case BFA_IOIM_SM_CLEANUP:
2032                bfa_ioim_notify_cleanup(ioim);
2033                break;
2034
2035        case BFA_IOIM_SM_HWFAIL:
2036                break;
2037
2038        default:
2039                bfa_sm_fault(ioim->bfa, event);
2040        }
2041}
2042
2043/*
2044 * IO bfa callback is pending. IO resource cannot be freed.
2045 */
2046static void
2047bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2048{
2049        bfa_trc(ioim->bfa, ioim->iotag);
2050        bfa_trc(ioim->bfa, event);
2051
2052        switch (event) {
2053        case BFA_IOIM_SM_HCB:
2054                bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2055                list_del(&ioim->qe);
2056                list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2057                break;
2058
2059        case BFA_IOIM_SM_FREE:
2060                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2061                break;
2062
2063        case BFA_IOIM_SM_CLEANUP:
2064                bfa_ioim_notify_cleanup(ioim);
2065                break;
2066
2067        case BFA_IOIM_SM_HWFAIL:
2068                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2069                break;
2070
2071        default:
2072                bfa_sm_fault(ioim->bfa, event);
2073        }
2074}
2075
2076/*
2077 * IO is completed, waiting resource free from firmware.
2078 */
2079static void
2080bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2081{
2082        bfa_trc(ioim->bfa, ioim->iotag);
2083        bfa_trc(ioim->bfa, event);
2084
2085        switch (event) {
2086        case BFA_IOIM_SM_FREE:
2087                bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2088                bfa_ioim_free(ioim);
2089                break;
2090
2091        case BFA_IOIM_SM_CLEANUP:
2092                bfa_ioim_notify_cleanup(ioim);
2093                break;
2094
2095        case BFA_IOIM_SM_HWFAIL:
2096                break;
2097
2098        default:
2099                bfa_sm_fault(ioim->bfa, event);
2100        }
2101}
2102
2103/*
2104 * This is called from bfa_fcpim_start after the bfa_init() with flash read
2105 * is complete by driver. now invalidate the stale content of lun mask
2106 * like unit attention, rp tag and lp tag.
2107 */
2108static void
2109bfa_ioim_lm_init(struct bfa_s *bfa)
2110{
2111        struct bfa_lun_mask_s *lunm_list;
2112        int     i;
2113
2114        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2115                return;
2116
2117        lunm_list = bfa_get_lun_mask_list(bfa);
2118        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2119                lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
2120                lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2121                lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2122        }
2123}
2124
2125static void
2126__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2127{
2128        struct bfa_ioim_s *ioim = cbarg;
2129
2130        if (!complete) {
2131                bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2132                return;
2133        }
2134
2135        bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2136}
2137
2138static void
2139__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2140{
2141        struct bfa_ioim_s       *ioim = cbarg;
2142        struct bfi_ioim_rsp_s *m;
2143        u8      *snsinfo = NULL;
2144        u8      sns_len = 0;
2145        s32     residue = 0;
2146
2147        if (!complete) {
2148                bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2149                return;
2150        }
2151
2152        m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2153        if (m->io_status == BFI_IOIM_STS_OK) {
2154                /*
2155                 * setup sense information, if present
2156                 */
2157                if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2158                                        m->sns_len) {
2159                        sns_len = m->sns_len;
2160                        snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2161                                                ioim->iotag);
2162                }
2163
2164                /*
2165                 * setup residue value correctly for normal completions
2166                 */
2167                if (m->resid_flags == FCP_RESID_UNDER) {
2168                        residue = be32_to_cpu(m->residue);
2169                        bfa_stats(ioim->itnim, iocomp_underrun);
2170                }
2171                if (m->resid_flags == FCP_RESID_OVER) {
2172                        residue = be32_to_cpu(m->residue);
2173                        residue = -residue;
2174                        bfa_stats(ioim->itnim, iocomp_overrun);
2175                }
2176        }
2177
2178        bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2179                          m->scsi_status, sns_len, snsinfo, residue);
2180}
2181
2182void
2183bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2184                        u16 rp_tag, u8 lp_tag)
2185{
2186        struct bfa_lun_mask_s *lun_list;
2187        u8      i;
2188
2189        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2190                return;
2191
2192        lun_list = bfa_get_lun_mask_list(bfa);
2193        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2194                if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2195                        if ((lun_list[i].lp_wwn == lp_wwn) &&
2196                            (lun_list[i].rp_wwn == rp_wwn)) {
2197                                lun_list[i].rp_tag = rp_tag;
2198                                lun_list[i].lp_tag = lp_tag;
2199                        }
2200                }
2201        }
2202}
2203
2204/*
2205 * set UA for all active luns in LM DB
2206 */
2207static void
2208bfa_ioim_lm_set_ua(struct bfa_s *bfa)
2209{
2210        struct bfa_lun_mask_s   *lunm_list;
2211        int     i;
2212
2213        lunm_list = bfa_get_lun_mask_list(bfa);
2214        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2215                if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2216                        continue;
2217                lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2218        }
2219}
2220
2221bfa_status_t
2222bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
2223{
2224        struct bfa_lunmask_cfg_s        *lun_mask;
2225
2226        bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2227        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2228                return BFA_STATUS_FAILED;
2229
2230        if (bfa_get_lun_mask_status(bfa) == update)
2231                return BFA_STATUS_NO_CHANGE;
2232
2233        lun_mask = bfa_get_lun_mask(bfa);
2234        lun_mask->status = update;
2235
2236        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
2237                bfa_ioim_lm_set_ua(bfa);
2238
2239        return  bfa_dconf_update(bfa);
2240}
2241
2242bfa_status_t
2243bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
2244{
2245        int i;
2246        struct bfa_lun_mask_s   *lunm_list;
2247
2248        bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2249        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2250                return BFA_STATUS_FAILED;
2251
2252        lunm_list = bfa_get_lun_mask_list(bfa);
2253        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2254                if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2255                        if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
2256                                bfa_rport_unset_lunmask(bfa,
2257                                  BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
2258                }
2259        }
2260
2261        memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
2262        return bfa_dconf_update(bfa);
2263}
2264
2265bfa_status_t
2266bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
2267{
2268        struct bfa_lunmask_cfg_s *lun_mask;
2269
2270        bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2271        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2272                return BFA_STATUS_FAILED;
2273
2274        lun_mask = bfa_get_lun_mask(bfa);
2275        memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
2276        return BFA_STATUS_OK;
2277}
2278
2279bfa_status_t
2280bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2281                      wwn_t rpwwn, struct scsi_lun lun)
2282{
2283        struct bfa_lun_mask_s *lunm_list;
2284        struct bfa_rport_s *rp = NULL;
2285        int i, free_index = MAX_LUN_MASK_CFG + 1;
2286        struct bfa_fcs_lport_s *port = NULL;
2287        struct bfa_fcs_rport_s *rp_fcs;
2288
2289        bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2290        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2291                return BFA_STATUS_FAILED;
2292
2293        port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
2294                                   vf_id, *pwwn);
2295        if (port) {
2296                *pwwn = port->port_cfg.pwwn;
2297                rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2298                if (rp_fcs)
2299                        rp = rp_fcs->bfa_rport;
2300        }
2301
2302        lunm_list = bfa_get_lun_mask_list(bfa);
2303        /* if entry exists */
2304        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2305                if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2306                        free_index = i;
2307                if ((lunm_list[i].lp_wwn == *pwwn) &&
2308                    (lunm_list[i].rp_wwn == rpwwn) &&
2309                    (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2310                     scsilun_to_int((struct scsi_lun *)&lun)))
2311                        return  BFA_STATUS_ENTRY_EXISTS;
2312        }
2313
2314        if (free_index > MAX_LUN_MASK_CFG)
2315                return BFA_STATUS_MAX_ENTRY_REACHED;
2316
2317        if (rp) {
2318                lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
2319                                                   rp->rport_info.local_pid);
2320                lunm_list[free_index].rp_tag = rp->rport_tag;
2321        } else {
2322                lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
2323                lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
2324        }
2325
2326        lunm_list[free_index].lp_wwn = *pwwn;
2327        lunm_list[free_index].rp_wwn = rpwwn;
2328        lunm_list[free_index].lun = lun;
2329        lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
2330
2331        /* set for all luns in this rp */
2332        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2333                if ((lunm_list[i].lp_wwn == *pwwn) &&
2334                    (lunm_list[i].rp_wwn == rpwwn))
2335                        lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2336        }
2337
2338        return bfa_dconf_update(bfa);
2339}
2340
2341bfa_status_t
2342bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2343                         wwn_t rpwwn, struct scsi_lun lun)
2344{
2345        struct bfa_lun_mask_s   *lunm_list;
2346        struct bfa_rport_s      *rp = NULL;
2347        struct bfa_fcs_lport_s *port = NULL;
2348        struct bfa_fcs_rport_s *rp_fcs;
2349        int     i;
2350
2351        /* in min cfg lunm_list could be NULL but  no commands should run. */
2352        if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2353                return BFA_STATUS_FAILED;
2354
2355        bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2356        bfa_trc(bfa, *pwwn);
2357        bfa_trc(bfa, rpwwn);
2358        bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
2359
2360        if (*pwwn == 0) {
2361                port = bfa_fcs_lookup_port(
2362                                &((struct bfad_s *)bfa->bfad)->bfa_fcs,
2363                                vf_id, *pwwn);
2364                if (port) {
2365                        *pwwn = port->port_cfg.pwwn;
2366                        rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2367                        if (rp_fcs)
2368                                rp = rp_fcs->bfa_rport;
2369                }
2370        }
2371
2372        lunm_list = bfa_get_lun_mask_list(bfa);
2373        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2374                if ((lunm_list[i].lp_wwn == *pwwn) &&
2375                    (lunm_list[i].rp_wwn == rpwwn) &&
2376                    (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2377                     scsilun_to_int((struct scsi_lun *)&lun))) {
2378                        lunm_list[i].lp_wwn = 0;
2379                        lunm_list[i].rp_wwn = 0;
2380                        int_to_scsilun(0, &lunm_list[i].lun);
2381                        lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
2382                        if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
2383                                lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2384                                lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2385                        }
2386                        return bfa_dconf_update(bfa);
2387                }
2388        }
2389
2390        /* set for all luns in this rp */
2391        for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2392                if ((lunm_list[i].lp_wwn == *pwwn) &&
2393                    (lunm_list[i].rp_wwn == rpwwn))
2394                        lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2395        }
2396
2397        return BFA_STATUS_ENTRY_NOT_EXISTS;
2398}
2399
2400static void
2401__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2402{
2403        struct bfa_ioim_s *ioim = cbarg;
2404
2405        if (!complete) {
2406                bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2407                return;
2408        }
2409
2410        bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2411                          0, 0, NULL, 0);
2412}
2413
2414static void
2415__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2416{
2417        struct bfa_ioim_s *ioim = cbarg;
2418
2419        bfa_stats(ioim->itnim, path_tov_expired);
2420        if (!complete) {
2421                bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2422                return;
2423        }
2424
2425        bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2426                          0, 0, NULL, 0);
2427}
2428
2429static void
2430__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2431{
2432        struct bfa_ioim_s *ioim = cbarg;
2433
2434        if (!complete) {
2435                bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2436                return;
2437        }
2438
2439        bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2440}
2441
2442static void
2443bfa_ioim_sgpg_alloced(void *cbarg)
2444{
2445        struct bfa_ioim_s *ioim = cbarg;
2446
2447        ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2448        list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
2449        ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2450        bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2451}
2452
2453/*
2454 * Send I/O request to firmware.
2455 */
2456static  bfa_boolean_t
2457bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2458{
2459        struct bfa_itnim_s *itnim = ioim->itnim;
2460        struct bfi_ioim_req_s *m;
2461        static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
2462        struct bfi_sge_s *sge, *sgpge;
2463        u32     pgdlen = 0;
2464        u32     fcp_dl;
2465        u64 addr;
2466        struct scatterlist *sg;
2467        struct bfa_sgpg_s *sgpg;
2468        struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2469        u32 i, sge_id, pgcumsz;
2470        enum dma_data_direction dmadir;
2471
2472        /*
2473         * check for room in queue to send request now
2474         */
2475        m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2476        if (!m) {
2477                bfa_stats(ioim->itnim, qwait);
2478                bfa_reqq_wait(ioim->bfa, ioim->reqq,
2479                                  &ioim->iosp->reqq_wait);
2480                return BFA_FALSE;
2481        }
2482
2483        /*
2484         * build i/o request message next
2485         */
2486        m->io_tag = cpu_to_be16(ioim->iotag);
2487        m->rport_hdl = ioim->itnim->rport->fw_handle;
2488        m->io_timeout = 0;
2489
2490        sge = &m->sges[0];
2491        sgpg = ioim->sgpg;
2492        sge_id = 0;
2493        sgpge = NULL;
2494        pgcumsz = 0;
2495        scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2496                if (i == 0) {
2497                        /* build inline IO SG element */
2498                        addr = bfa_sgaddr_le(sg_dma_address(sg));
2499                        sge->sga = *(union bfi_addr_u *) &addr;
2500                        pgdlen = sg_dma_len(sg);
2501                        sge->sg_len = pgdlen;
2502                        sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
2503                                        BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
2504                        bfa_sge_to_be(sge);
2505                        sge++;
2506                } else {
2507                        if (sge_id == 0)
2508                                sgpge = sgpg->sgpg->sges;
2509
2510                        addr = bfa_sgaddr_le(sg_dma_address(sg));
2511                        sgpge->sga = *(union bfi_addr_u *) &addr;
2512                        sgpge->sg_len = sg_dma_len(sg);
2513                        pgcumsz += sgpge->sg_len;
2514
2515                        /* set flags */
2516                        if (i < (ioim->nsges - 1) &&
2517                                        sge_id < (BFI_SGPG_DATA_SGES - 1))
2518                                sgpge->flags = BFI_SGE_DATA;
2519                        else if (i < (ioim->nsges - 1))
2520                                sgpge->flags = BFI_SGE_DATA_CPL;
2521                        else
2522                                sgpge->flags = BFI_SGE_DATA_LAST;
2523
2524                        bfa_sge_to_le(sgpge);
2525
2526                        sgpge++;
2527                        if (i == (ioim->nsges - 1)) {
2528                                sgpge->flags = BFI_SGE_PGDLEN;
2529                                sgpge->sga.a32.addr_lo = 0;
2530                                sgpge->sga.a32.addr_hi = 0;
2531                                sgpge->sg_len = pgcumsz;
2532                                bfa_sge_to_le(sgpge);
2533                        } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2534                                sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2535                                sgpge->flags = BFI_SGE_LINK;
2536                                sgpge->sga = sgpg->sgpg_pa;
2537                                sgpge->sg_len = pgcumsz;
2538                                bfa_sge_to_le(sgpge);
2539                                sge_id = 0;
2540                                pgcumsz = 0;
2541                        }
2542                }
2543        }
2544
2545        if (ioim->nsges > BFI_SGE_INLINE) {
2546                sge->sga = ioim->sgpg->sgpg_pa;
2547        } else {
2548                sge->sga.a32.addr_lo = 0;
2549                sge->sga.a32.addr_hi = 0;
2550        }
2551        sge->sg_len = pgdlen;
2552        sge->flags = BFI_SGE_PGDLEN;
2553        bfa_sge_to_be(sge);
2554
2555        /*
2556         * set up I/O command parameters
2557         */
2558        m->cmnd = cmnd_z0;
2559        int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2560        dmadir = cmnd->sc_data_direction;
2561        if (dmadir == DMA_TO_DEVICE)
2562                m->cmnd.iodir = FCP_IODIR_WRITE;
2563        else if (dmadir == DMA_FROM_DEVICE)
2564                m->cmnd.iodir = FCP_IODIR_READ;
2565        else
2566                m->cmnd.iodir = FCP_IODIR_NONE;
2567
2568        m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
2569        fcp_dl = scsi_bufflen(cmnd);
2570        m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
2571
2572        /*
2573         * set up I/O message header
2574         */
2575        switch (m->cmnd.iodir) {
2576        case FCP_IODIR_READ:
2577                bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
2578                bfa_stats(itnim, input_reqs);
2579                ioim->itnim->stats.rd_throughput += fcp_dl;
2580                break;
2581        case FCP_IODIR_WRITE:
2582                bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
2583                bfa_stats(itnim, output_reqs);
2584                ioim->itnim->stats.wr_throughput += fcp_dl;
2585                break;
2586        case FCP_IODIR_RW:
2587                bfa_stats(itnim, input_reqs);
2588                bfa_stats(itnim, output_reqs);
2589        default:
2590                bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2591        }
2592        if (itnim->seq_rec ||
2593            (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
2594                bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
2595
2596        /*
2597         * queue I/O message to firmware
2598         */
2599        bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2600        return BFA_TRUE;
2601}
2602
2603/*
2604 * Setup any additional SG pages needed.Inline SG element is setup
2605 * at queuing time.
2606 */
2607static bfa_boolean_t
2608bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
2609{
2610        u16     nsgpgs;
2611
2612        WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
2613
2614        /*
2615         * allocate SG pages needed
2616         */
2617        nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2618        if (!nsgpgs)
2619                return BFA_TRUE;
2620
2621        if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2622            != BFA_STATUS_OK) {
2623                bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2624                return BFA_FALSE;
2625        }
2626
2627        ioim->nsgpgs = nsgpgs;
2628        ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
2629
2630        return BFA_TRUE;
2631}
2632
2633/*
2634 * Send I/O abort request to firmware.
2635 */
2636static  bfa_boolean_t
2637bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2638{
2639        struct bfi_ioim_abort_req_s *m;
2640        enum bfi_ioim_h2i       msgop;
2641
2642        /*
2643         * check for room in queue to send request now
2644         */
2645        m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2646        if (!m)
2647                return BFA_FALSE;
2648
2649        /*
2650         * build i/o request message next
2651         */
2652        if (ioim->iosp->abort_explicit)
2653                msgop = BFI_IOIM_H2I_IOABORT_REQ;
2654        else
2655                msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2656
2657        bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
2658        m->io_tag    = cpu_to_be16(ioim->iotag);
2659        m->abort_tag = ++ioim->abort_tag;
2660
2661        /*
2662         * queue I/O message to firmware
2663         */
2664        bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
2665        return BFA_TRUE;
2666}
2667
2668/*
2669 * Call to resume any I/O requests waiting for room in request queue.
2670 */
2671static void
2672bfa_ioim_qresume(void *cbarg)
2673{
2674        struct bfa_ioim_s *ioim = cbarg;
2675
2676        bfa_stats(ioim->itnim, qresumes);
2677        bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2678}
2679
2680
2681static void
2682bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2683{
2684        /*
2685         * Move IO from itnim queue to fcpim global queue since itnim will be
2686         * freed.
2687         */
2688        list_del(&ioim->qe);
2689        list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2690
2691        if (!ioim->iosp->tskim) {
2692                if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2693                        bfa_cb_dequeue(&ioim->hcb_qe);
2694                        list_del(&ioim->qe);
2695                        list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2696                }
2697                bfa_itnim_iodone(ioim->itnim);
2698        } else
2699                bfa_wc_down(&ioim->iosp->tskim->wc);
2700}
2701
2702static bfa_boolean_t
2703bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2704{
2705        if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2706            (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)))    ||
2707            (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort))         ||
2708            (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull))   ||
2709            (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb))           ||
2710            (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free))      ||
2711            (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2712                return BFA_FALSE;
2713
2714        return BFA_TRUE;
2715}
2716
2717void
2718bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2719{
2720        /*
2721         * If path tov timer expired, failback with PATHTOV status - these
2722         * IO requests are not normally retried by IO stack.
2723         *
2724         * Otherwise device cameback online and fail it with normal failed
2725         * status so that IO stack retries these failed IO requests.
2726         */
2727        if (iotov)
2728                ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2729        else {
2730                ioim->io_cbfn = __bfa_cb_ioim_failed;
2731                bfa_stats(ioim->itnim, iocom_nexus_abort);
2732        }
2733        bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2734
2735        /*
2736         * Move IO to fcpim global queue since itnim will be
2737         * freed.
2738         */
2739        list_del(&ioim->qe);
2740        list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2741}
2742
2743
2744/*
2745 * Memory allocation and initialization.
2746 */
2747void
2748bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
2749{
2750        struct bfa_ioim_s               *ioim;
2751        struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
2752        struct bfa_ioim_sp_s    *iosp;
2753        u16             i;
2754
2755        /*
2756         * claim memory first
2757         */
2758        ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
2759        fcpim->ioim_arr = ioim;
2760        bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
2761
2762        iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
2763        fcpim->ioim_sp_arr = iosp;
2764        bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
2765
2766        /*
2767         * Initialize ioim free queues
2768         */
2769        INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2770        INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2771
2772        for (i = 0; i < fcpim->fcp->num_ioim_reqs;
2773             i++, ioim++, iosp++) {
2774                /*
2775                 * initialize IOIM
2776                 */
2777                memset(ioim, 0, sizeof(struct bfa_ioim_s));
2778                ioim->iotag   = i;
2779                ioim->bfa     = fcpim->bfa;
2780                ioim->fcpim   = fcpim;
2781                ioim->iosp    = iosp;
2782                INIT_LIST_HEAD(&ioim->sgpg_q);
2783                bfa_reqq_winit(&ioim->iosp->reqq_wait,
2784                                   bfa_ioim_qresume, ioim);
2785                bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2786                                   bfa_ioim_sgpg_alloced, ioim);
2787                bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2788        }
2789}
2790
2791void
2792bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2793{
2794        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2795        struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2796        struct bfa_ioim_s *ioim;
2797        u16     iotag;
2798        enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2799
2800        iotag = be16_to_cpu(rsp->io_tag);
2801
2802        ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2803        WARN_ON(ioim->iotag != iotag);
2804
2805        bfa_trc(ioim->bfa, ioim->iotag);
2806        bfa_trc(ioim->bfa, rsp->io_status);
2807        bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2808
2809        if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
2810                ioim->iosp->comp_rspmsg = *m;
2811
2812        switch (rsp->io_status) {
2813        case BFI_IOIM_STS_OK:
2814                bfa_stats(ioim->itnim, iocomp_ok);
2815                if (rsp->reuse_io_tag == 0)
2816                        evt = BFA_IOIM_SM_DONE;
2817                else
2818                        evt = BFA_IOIM_SM_COMP;
2819                break;
2820
2821        case BFI_IOIM_STS_TIMEDOUT:
2822                bfa_stats(ioim->itnim, iocomp_timedout);
2823        case BFI_IOIM_STS_ABORTED:
2824                rsp->io_status = BFI_IOIM_STS_ABORTED;
2825                bfa_stats(ioim->itnim, iocomp_aborted);
2826                if (rsp->reuse_io_tag == 0)
2827                        evt = BFA_IOIM_SM_DONE;
2828                else
2829                        evt = BFA_IOIM_SM_COMP;
2830                break;
2831
2832        case BFI_IOIM_STS_PROTO_ERR:
2833                bfa_stats(ioim->itnim, iocom_proto_err);
2834                WARN_ON(!rsp->reuse_io_tag);
2835                evt = BFA_IOIM_SM_COMP;
2836                break;
2837
2838        case BFI_IOIM_STS_SQER_NEEDED:
2839                bfa_stats(ioim->itnim, iocom_sqer_needed);
2840                WARN_ON(rsp->reuse_io_tag != 0);
2841                evt = BFA_IOIM_SM_SQRETRY;
2842                break;
2843
2844        case BFI_IOIM_STS_RES_FREE:
2845                bfa_stats(ioim->itnim, iocom_res_free);
2846                evt = BFA_IOIM_SM_FREE;
2847                break;
2848
2849        case BFI_IOIM_STS_HOST_ABORTED:
2850                bfa_stats(ioim->itnim, iocom_hostabrts);
2851                if (rsp->abort_tag != ioim->abort_tag) {
2852                        bfa_trc(ioim->bfa, rsp->abort_tag);
2853                        bfa_trc(ioim->bfa, ioim->abort_tag);
2854                        return;
2855                }
2856
2857                if (rsp->reuse_io_tag)
2858                        evt = BFA_IOIM_SM_ABORT_COMP;
2859                else
2860                        evt = BFA_IOIM_SM_ABORT_DONE;
2861                break;
2862
2863        case BFI_IOIM_STS_UTAG:
2864                bfa_stats(ioim->itnim, iocom_utags);
2865                evt = BFA_IOIM_SM_COMP_UTAG;
2866                break;
2867
2868        default:
2869                WARN_ON(1);
2870        }
2871
2872        bfa_sm_send_event(ioim, evt);
2873}
2874
2875void
2876bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2877{
2878        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2879        struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2880        struct bfa_ioim_s *ioim;
2881        u16     iotag;
2882
2883        iotag = be16_to_cpu(rsp->io_tag);
2884
2885        ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2886        WARN_ON(ioim->iotag != iotag);
2887
2888        bfa_ioim_cb_profile_comp(fcpim, ioim);
2889
2890        bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2891}
2892
2893/*
2894 * Called by itnim to clean up IO while going offline.
2895 */
2896void
2897bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2898{
2899        bfa_trc(ioim->bfa, ioim->iotag);
2900        bfa_stats(ioim->itnim, io_cleanups);
2901
2902        ioim->iosp->tskim = NULL;
2903        bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2904}
2905
2906void
2907bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2908{
2909        bfa_trc(ioim->bfa, ioim->iotag);
2910        bfa_stats(ioim->itnim, io_tmaborts);
2911
2912        ioim->iosp->tskim = tskim;
2913        bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2914}
2915
2916/*
2917 * IOC failure handling.
2918 */
2919void
2920bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2921{
2922        bfa_trc(ioim->bfa, ioim->iotag);
2923        bfa_stats(ioim->itnim, io_iocdowns);
2924        bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2925}
2926
2927/*
2928 * IO offline TOV popped. Fail the pending IO.
2929 */
2930void
2931bfa_ioim_tov(struct bfa_ioim_s *ioim)
2932{
2933        bfa_trc(ioim->bfa, ioim->iotag);
2934        bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2935}
2936
2937
2938/*
2939 * Allocate IOIM resource for initiator mode I/O request.
2940 */
2941struct bfa_ioim_s *
2942bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2943                struct bfa_itnim_s *itnim, u16 nsges)
2944{
2945        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
2946        struct bfa_ioim_s *ioim;
2947        struct bfa_iotag_s *iotag = NULL;
2948
2949        /*
2950         * alocate IOIM resource
2951         */
2952        bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
2953        if (!iotag) {
2954                bfa_stats(itnim, no_iotags);
2955                return NULL;
2956        }
2957
2958        ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
2959
2960        ioim->dio = dio;
2961        ioim->itnim = itnim;
2962        ioim->nsges = nsges;
2963        ioim->nsgpgs = 0;
2964
2965        bfa_stats(itnim, total_ios);
2966        fcpim->ios_active++;
2967
2968        list_add_tail(&ioim->qe, &itnim->io_q);
2969
2970        return ioim;
2971}
2972
2973void
2974bfa_ioim_free(struct bfa_ioim_s *ioim)
2975{
2976        struct bfa_fcpim_s *fcpim = ioim->fcpim;
2977        struct bfa_iotag_s *iotag;
2978
2979        if (ioim->nsgpgs > 0)
2980                bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2981
2982        bfa_stats(ioim->itnim, io_comps);
2983        fcpim->ios_active--;
2984
2985        ioim->iotag &= BFA_IOIM_IOTAG_MASK;
2986
2987        WARN_ON(!(ioim->iotag <
2988                (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
2989        iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
2990
2991        if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
2992                list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
2993        else
2994                list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
2995
2996        list_del(&ioim->qe);
2997}
2998
2999void
3000bfa_ioim_start(struct bfa_ioim_s *ioim)
3001{
3002        bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
3003
3004        /*
3005         * Obtain the queue over which this request has to be issued
3006         */
3007        ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
3008                        BFA_FALSE : bfa_itnim_get_reqq(ioim);
3009
3010        bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
3011}
3012
3013/*
3014 * Driver I/O abort request.
3015 */
3016bfa_status_t
3017bfa_ioim_abort(struct bfa_ioim_s *ioim)
3018{
3019
3020        bfa_trc(ioim->bfa, ioim->iotag);
3021
3022        if (!bfa_ioim_is_abortable(ioim))
3023                return BFA_STATUS_FAILED;
3024
3025        bfa_stats(ioim->itnim, io_aborts);
3026        bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
3027
3028        return BFA_STATUS_OK;
3029}
3030
3031/*
3032 *  BFA TSKIM state machine functions
3033 */
3034
3035/*
3036 * Task management command beginning state.
3037 */
3038static void
3039bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3040{
3041        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3042
3043        switch (event) {
3044        case BFA_TSKIM_SM_START:
3045                bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3046                bfa_tskim_gather_ios(tskim);
3047
3048                /*
3049                 * If device is offline, do not send TM on wire. Just cleanup
3050                 * any pending IO requests and complete TM request.
3051                 */
3052                if (!bfa_itnim_is_online(tskim->itnim)) {
3053                        bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3054                        tskim->tsk_status = BFI_TSKIM_STS_OK;
3055                        bfa_tskim_cleanup_ios(tskim);
3056                        return;
3057                }
3058
3059                if (!bfa_tskim_send(tskim)) {
3060                        bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
3061                        bfa_stats(tskim->itnim, tm_qwait);
3062                        bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3063                                          &tskim->reqq_wait);
3064                }
3065                break;
3066
3067        default:
3068                bfa_sm_fault(tskim->bfa, event);
3069        }
3070}
3071
3072/*
3073 * TM command is active, awaiting completion from firmware to
3074 * cleanup IO requests in TM scope.
3075 */
3076static void
3077bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3078{
3079        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3080
3081        switch (event) {
3082        case BFA_TSKIM_SM_DONE:
3083                bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3084                bfa_tskim_cleanup_ios(tskim);
3085                break;
3086
3087        case BFA_TSKIM_SM_CLEANUP:
3088                bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3089                if (!bfa_tskim_send_abort(tskim)) {
3090                        bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
3091                        bfa_stats(tskim->itnim, tm_qwait);
3092                        bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
3093                                &tskim->reqq_wait);
3094                }
3095                break;
3096
3097        case BFA_TSKIM_SM_HWFAIL:
3098                bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3099                bfa_tskim_iocdisable_ios(tskim);
3100                bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3101                break;
3102
3103        default:
3104                bfa_sm_fault(tskim->bfa, event);
3105        }
3106}
3107
3108/*
3109 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3110 * completion event from firmware.
3111 */
3112static void
3113bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3114{
3115        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3116
3117        switch (event) {
3118        case BFA_TSKIM_SM_DONE:
3119                /*
3120                 * Ignore and wait for ABORT completion from firmware.
3121                 */
3122                break;
3123
3124        case BFA_TSKIM_SM_UTAG:
3125        case BFA_TSKIM_SM_CLEANUP_DONE:
3126                bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3127                bfa_tskim_cleanup_ios(tskim);
3128                break;
3129
3130        case BFA_TSKIM_SM_HWFAIL:
3131                bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3132                bfa_tskim_iocdisable_ios(tskim);
3133                bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3134                break;
3135
3136        default:
3137                bfa_sm_fault(tskim->bfa, event);
3138        }
3139}
3140
3141static void
3142bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3143{
3144        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3145
3146        switch (event) {
3147        case BFA_TSKIM_SM_IOS_DONE:
3148                bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3149                bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3150                break;
3151
3152        case BFA_TSKIM_SM_CLEANUP:
3153                /*
3154                 * Ignore, TM command completed on wire.
3155                 * Notify TM conmpletion on IO cleanup completion.
3156                 */
3157                break;
3158
3159        case BFA_TSKIM_SM_HWFAIL:
3160                bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3161                bfa_tskim_iocdisable_ios(tskim);
3162                bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3163                break;
3164
3165        default:
3166                bfa_sm_fault(tskim->bfa, event);
3167        }
3168}
3169
3170/*
3171 * Task management command is waiting for room in request CQ
3172 */
3173static void
3174bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3175{
3176        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3177
3178        switch (event) {
3179        case BFA_TSKIM_SM_QRESUME:
3180                bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3181                bfa_tskim_send(tskim);
3182                break;
3183
3184        case BFA_TSKIM_SM_CLEANUP:
3185                /*
3186                 * No need to send TM on wire since ITN is offline.
3187                 */
3188                bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3189                bfa_reqq_wcancel(&tskim->reqq_wait);
3190                bfa_tskim_cleanup_ios(tskim);
3191                break;
3192
3193        case BFA_TSKIM_SM_HWFAIL:
3194                bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3195                bfa_reqq_wcancel(&tskim->reqq_wait);
3196                bfa_tskim_iocdisable_ios(tskim);
3197                bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3198                break;
3199
3200        default:
3201                bfa_sm_fault(tskim->bfa, event);
3202        }
3203}
3204
3205/*
3206 * Task management command is active, awaiting for room in request CQ
3207 * to send clean up request.
3208 */
3209static void
3210bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3211                enum bfa_tskim_event event)
3212{
3213        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3214
3215        switch (event) {
3216        case BFA_TSKIM_SM_DONE:
3217                bfa_reqq_wcancel(&tskim->reqq_wait);
3218                /*
3219                 * Fall through !!!
3220                 */
3221        case BFA_TSKIM_SM_QRESUME:
3222                bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3223                bfa_tskim_send_abort(tskim);
3224                break;
3225
3226        case BFA_TSKIM_SM_HWFAIL:
3227                bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3228                bfa_reqq_wcancel(&tskim->reqq_wait);
3229                bfa_tskim_iocdisable_ios(tskim);
3230                bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3231                break;
3232
3233        default:
3234                bfa_sm_fault(tskim->bfa, event);
3235        }
3236}
3237
3238/*
3239 * BFA callback is pending
3240 */
3241static void
3242bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3243{
3244        bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
3245
3246        switch (event) {
3247        case BFA_TSKIM_SM_HCB:
3248                bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3249                bfa_tskim_free(tskim);
3250                break;
3251
3252        case BFA_TSKIM_SM_CLEANUP:
3253                bfa_tskim_notify_comp(tskim);
3254                break;
3255
3256        case BFA_TSKIM_SM_HWFAIL:
3257                break;
3258
3259        default:
3260                bfa_sm_fault(tskim->bfa, event);
3261        }
3262}
3263
3264static void
3265__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3266{
3267        struct bfa_tskim_s *tskim = cbarg;
3268
3269        if (!complete) {
3270                bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3271                return;
3272        }
3273
3274        bfa_stats(tskim->itnim, tm_success);
3275        bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3276}
3277
3278static void
3279__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3280{
3281        struct bfa_tskim_s *tskim = cbarg;
3282
3283        if (!complete) {
3284                bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3285                return;
3286        }
3287
3288        bfa_stats(tskim->itnim, tm_failures);
3289        bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3290                                BFI_TSKIM_STS_FAILED);
3291}
3292
3293static bfa_boolean_t
3294bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
3295{
3296        switch (tskim->tm_cmnd) {
3297        case FCP_TM_TARGET_RESET:
3298                return BFA_TRUE;
3299
3300        case FCP_TM_ABORT_TASK_SET:
3301        case FCP_TM_CLEAR_TASK_SET:
3302        case FCP_TM_LUN_RESET:
3303        case FCP_TM_CLEAR_ACA:
3304                return !memcmp(&tskim->lun, &lun, sizeof(lun));
3305
3306        default:
3307                WARN_ON(1);
3308        }
3309
3310        return BFA_FALSE;
3311}
3312
3313/*
3314 * Gather affected IO requests and task management commands.
3315 */
3316static void
3317bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3318{
3319        struct bfa_itnim_s *itnim = tskim->itnim;
3320        struct bfa_ioim_s *ioim;
3321        struct list_head *qe, *qen;
3322        struct scsi_cmnd *cmnd;
3323        struct scsi_lun scsilun;
3324
3325        INIT_LIST_HEAD(&tskim->io_q);
3326
3327        /*
3328         * Gather any active IO requests first.
3329         */
3330        list_for_each_safe(qe, qen, &itnim->io_q) {
3331                ioim = (struct bfa_ioim_s *) qe;
3332                cmnd = (struct scsi_cmnd *) ioim->dio;
3333                int_to_scsilun(cmnd->device->lun, &scsilun);
3334                if (bfa_tskim_match_scope(tskim, scsilun)) {
3335                        list_del(&ioim->qe);
3336                        list_add_tail(&ioim->qe, &tskim->io_q);
3337                }
3338        }
3339
3340        /*
3341         * Failback any pending IO requests immediately.
3342         */
3343        list_for_each_safe(qe, qen, &itnim->pending_q) {
3344                ioim = (struct bfa_ioim_s *) qe;
3345                cmnd = (struct scsi_cmnd *) ioim->dio;
3346                int_to_scsilun(cmnd->device->lun, &scsilun);
3347                if (bfa_tskim_match_scope(tskim, scsilun)) {
3348                        list_del(&ioim->qe);
3349                        list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3350                        bfa_ioim_tov(ioim);
3351                }
3352        }
3353}
3354
3355/*
3356 * IO cleanup completion
3357 */
3358static void
3359bfa_tskim_cleanp_comp(void *tskim_cbarg)
3360{
3361        struct bfa_tskim_s *tskim = tskim_cbarg;
3362
3363        bfa_stats(tskim->itnim, tm_io_comps);
3364        bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3365}
3366
3367/*
3368 * Gather affected IO requests and task management commands.
3369 */
3370static void
3371bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3372{
3373        struct bfa_ioim_s *ioim;
3374        struct list_head        *qe, *qen;
3375
3376        bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3377
3378        list_for_each_safe(qe, qen, &tskim->io_q) {
3379                ioim = (struct bfa_ioim_s *) qe;
3380                bfa_wc_up(&tskim->wc);
3381                bfa_ioim_cleanup_tm(ioim, tskim);
3382        }
3383
3384        bfa_wc_wait(&tskim->wc);
3385}
3386
3387/*
3388 * Send task management request to firmware.
3389 */
3390static bfa_boolean_t
3391bfa_tskim_send(struct bfa_tskim_s *tskim)
3392{
3393        struct bfa_itnim_s *itnim = tskim->itnim;
3394        struct bfi_tskim_req_s *m;
3395
3396        /*
3397         * check for room in queue to send request now
3398         */
3399        m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3400        if (!m)
3401                return BFA_FALSE;
3402
3403        /*
3404         * build i/o request message next
3405         */
3406        bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3407                        bfa_fn_lpu(tskim->bfa));
3408
3409        m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
3410        m->itn_fhdl = tskim->itnim->rport->fw_handle;
3411        m->t_secs = tskim->tsecs;
3412        m->lun = tskim->lun;
3413        m->tm_flags = tskim->tm_cmnd;
3414
3415        /*
3416         * queue I/O message to firmware
3417         */
3418        bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3419        return BFA_TRUE;
3420}
3421
3422/*
3423 * Send abort request to cleanup an active TM to firmware.
3424 */
3425static bfa_boolean_t
3426bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3427{
3428        struct bfa_itnim_s      *itnim = tskim->itnim;
3429        struct bfi_tskim_abortreq_s     *m;
3430
3431        /*
3432         * check for room in queue to send request now
3433         */
3434        m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3435        if (!m)
3436                return BFA_FALSE;
3437
3438        /*
3439         * build i/o request message next
3440         */
3441        bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3442                        bfa_fn_lpu(tskim->bfa));
3443
3444        m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
3445
3446        /*
3447         * queue I/O message to firmware
3448         */
3449        bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
3450        return BFA_TRUE;
3451}
3452
3453/*
3454 * Call to resume task management cmnd waiting for room in request queue.
3455 */
3456static void
3457bfa_tskim_qresume(void *cbarg)
3458{
3459        struct bfa_tskim_s *tskim = cbarg;
3460
3461        bfa_stats(tskim->itnim, tm_qresumes);
3462        bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3463}
3464
3465/*
3466 * Cleanup IOs associated with a task mangement command on IOC failures.
3467 */
3468static void
3469bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3470{
3471        struct bfa_ioim_s *ioim;
3472        struct list_head        *qe, *qen;
3473
3474        list_for_each_safe(qe, qen, &tskim->io_q) {
3475                ioim = (struct bfa_ioim_s *) qe;
3476                bfa_ioim_iocdisable(ioim);
3477        }
3478}
3479
3480/*
3481 * Notification on completions from related ioim.
3482 */
3483void
3484bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3485{
3486        bfa_wc_down(&tskim->wc);
3487}
3488
3489/*
3490 * Handle IOC h/w failure notification from itnim.
3491 */
3492void
3493bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3494{
3495        tskim->notify = BFA_FALSE;
3496        bfa_stats(tskim->itnim, tm_iocdowns);
3497        bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3498}
3499
3500/*
3501 * Cleanup TM command and associated IOs as part of ITNIM offline.
3502 */
3503void
3504bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3505{
3506        tskim->notify = BFA_TRUE;
3507        bfa_stats(tskim->itnim, tm_cleanups);
3508        bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3509}
3510
3511/*
3512 * Memory allocation and initialization.
3513 */
3514void
3515bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
3516{
3517        struct bfa_tskim_s *tskim;
3518        struct bfa_fcp_mod_s    *fcp = fcpim->fcp;
3519        u16     i;
3520
3521        INIT_LIST_HEAD(&fcpim->tskim_free_q);
3522        INIT_LIST_HEAD(&fcpim->tskim_unused_q);
3523
3524        tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
3525        fcpim->tskim_arr = tskim;
3526
3527        for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3528                /*
3529                 * initialize TSKIM
3530                 */
3531                memset(tskim, 0, sizeof(struct bfa_tskim_s));
3532                tskim->tsk_tag = i;
3533                tskim->bfa      = fcpim->bfa;
3534                tskim->fcpim    = fcpim;
3535                tskim->notify  = BFA_FALSE;
3536                bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3537                                        tskim);
3538                bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3539
3540                list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3541        }
3542
3543        bfa_mem_kva_curp(fcp) = (u8 *) tskim;
3544}
3545
3546void
3547bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3548{
3549        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3550        struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3551        struct bfa_tskim_s *tskim;
3552        u16     tsk_tag = be16_to_cpu(rsp->tsk_tag);
3553
3554        tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3555        WARN_ON(tskim->tsk_tag != tsk_tag);
3556
3557        tskim->tsk_status = rsp->tsk_status;
3558
3559        /*
3560         * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3561         * requests. All other statuses are for normal completions.
3562         */
3563        if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3564                bfa_stats(tskim->itnim, tm_cleanup_comps);
3565                bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3566        } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
3567                bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG);
3568        } else {
3569                bfa_stats(tskim->itnim, tm_fw_rsps);
3570                bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3571        }
3572}
3573
3574
3575struct bfa_tskim_s *
3576bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3577{
3578        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3579        struct bfa_tskim_s *tskim;
3580
3581        bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3582
3583        if (tskim)
3584                tskim->dtsk = dtsk;
3585
3586        return tskim;
3587}
3588
3589void
3590bfa_tskim_free(struct bfa_tskim_s *tskim)
3591{
3592        WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3593        list_del(&tskim->qe);
3594        list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3595}
3596
3597/*
3598 * Start a task management command.
3599 *
3600 * @param[in]   tskim   BFA task management command instance
3601 * @param[in]   itnim   i-t nexus for the task management command
3602 * @param[in]   lun     lun, if applicable
3603 * @param[in]   tm_cmnd Task management command code.
3604 * @param[in]   t_secs  Timeout in seconds
3605 *
3606 * @return None.
3607 */
3608void
3609bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3610                        struct scsi_lun lun,
3611                        enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3612{
3613        tskim->itnim    = itnim;
3614        tskim->lun      = lun;
3615        tskim->tm_cmnd = tm_cmnd;
3616        tskim->tsecs    = tsecs;
3617        tskim->notify  = BFA_FALSE;
3618        bfa_stats(itnim, tm_cmnds);
3619
3620        list_add_tail(&tskim->qe, &itnim->tsk_q);
3621        bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3622}
3623
3624void
3625bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
3626{
3627        struct bfa_fcpim_s      *fcpim = BFA_FCPIM(bfa);
3628        struct list_head        *qe;
3629        int     i;
3630
3631        for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
3632                bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
3633                list_add_tail(qe, &fcpim->tskim_unused_q);
3634        }
3635}
3636
3637/* BFA FCP module - parent module for fcpim */
3638
3639BFA_MODULE(fcp);
3640
3641static void
3642bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3643                struct bfa_s *bfa)
3644{
3645        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3646        struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
3647        struct bfa_mem_dma_s *seg_ptr;
3648        u16     nsegs, idx, per_seg_ios, num_io_req;
3649        u32     km_len = 0;
3650
3651        /*
3652         * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
3653         * So if the values are non zero, adjust them appropriately.
3654         */
3655        if (cfg->fwcfg.num_ioim_reqs &&
3656            cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
3657                cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
3658        else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
3659                cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3660
3661        if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
3662                cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3663
3664        num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3665        if (num_io_req > BFA_IO_MAX) {
3666                if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
3667                        cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
3668                        cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
3669                } else if (cfg->fwcfg.num_fwtio_reqs)
3670                        cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3671                else
3672                        cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3673        }
3674
3675        bfa_fcpim_meminfo(cfg, &km_len);
3676
3677        num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3678        km_len += num_io_req * sizeof(struct bfa_iotag_s);
3679        km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
3680
3681        /* dma memory */
3682        nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3683        per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
3684
3685        bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3686                if (num_io_req >= per_seg_ios) {
3687                        num_io_req -= per_seg_ios;
3688                        bfa_mem_dma_setup(minfo, seg_ptr,
3689                                per_seg_ios * BFI_IOIM_SNSLEN);
3690                } else
3691                        bfa_mem_dma_setup(minfo, seg_ptr,
3692                                num_io_req * BFI_IOIM_SNSLEN);
3693        }
3694
3695        /* kva memory */
3696        bfa_mem_kva_setup(minfo, fcp_kva, km_len);
3697}
3698
3699static void
3700bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3701                struct bfa_pcidev_s *pcidev)
3702{
3703        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3704        struct bfa_mem_dma_s *seg_ptr;
3705        u16     idx, nsegs, num_io_req;
3706
3707        fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3708        fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3709        fcp->num_fwtio_reqs  = cfg->fwcfg.num_fwtio_reqs;
3710        fcp->num_itns   = cfg->fwcfg.num_rports;
3711        fcp->bfa = bfa;
3712
3713        /*
3714         * Setup the pool of snsbase addr's, that is passed to fw as
3715         * part of bfi_iocfc_cfg_s.
3716         */
3717        num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3718        nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3719
3720        bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3721
3722                if (!bfa_mem_dma_virt(seg_ptr))
3723                        break;
3724
3725                fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
3726                fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
3727                bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
3728        }
3729
3730        fcp->throttle_update_required = 1;
3731        bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
3732
3733        bfa_iotag_attach(fcp);
3734
3735        fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
3736        bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
3737                        (fcp->num_itns * sizeof(struct bfa_itn_s));
3738        memset(fcp->itn_arr, 0,
3739                        (fcp->num_itns * sizeof(struct bfa_itn_s)));
3740}
3741
3742static void
3743bfa_fcp_detach(struct bfa_s *bfa)
3744{
3745}
3746
3747static void
3748bfa_fcp_start(struct bfa_s *bfa)
3749{
3750        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3751
3752        /*
3753         * bfa_init() with flash read is complete. now invalidate the stale
3754         * content of lun mask like unit attention, rp tag and lp tag.
3755         */
3756        bfa_ioim_lm_init(fcp->bfa);
3757}
3758
3759static void
3760bfa_fcp_stop(struct bfa_s *bfa)
3761{
3762}
3763
3764static void
3765bfa_fcp_iocdisable(struct bfa_s *bfa)
3766{
3767        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3768
3769        bfa_fcpim_iocdisable(fcp);
3770}
3771
3772void
3773bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
3774{
3775        struct bfa_fcp_mod_s    *mod = BFA_FCP_MOD(bfa);
3776        struct list_head        *qe;
3777        int     i;
3778
3779        /* Update io throttle value only once during driver load time */
3780        if (!mod->throttle_update_required)
3781                return;
3782
3783        for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3784                bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3785                list_add_tail(qe, &mod->iotag_unused_q);
3786        }
3787
3788        if (mod->num_ioim_reqs != num_ioim_fw) {
3789                bfa_trc(bfa, mod->num_ioim_reqs);
3790                bfa_trc(bfa, num_ioim_fw);
3791        }
3792
3793        mod->max_ioim_reqs = max_ioim_fw;
3794        mod->num_ioim_reqs = num_ioim_fw;
3795        mod->throttle_update_required = 0;
3796}
3797
3798void
3799bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
3800                void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
3801{
3802        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3803        struct bfa_itn_s *itn;
3804
3805        itn =  BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
3806        itn->isr = isr;
3807}
3808
3809/*
3810 * Itn interrupt processing.
3811 */
3812void
3813bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3814{
3815        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3816        union bfi_itn_i2h_msg_u msg;
3817        struct bfa_itn_s *itn;
3818
3819        msg.msg = m;
3820        itn =  BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
3821
3822        if (itn->isr)
3823                itn->isr(bfa, m);
3824        else
3825                WARN_ON(1);
3826}
3827
3828void
3829bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
3830{
3831        struct bfa_iotag_s *iotag;
3832        u16     num_io_req, i;
3833
3834        iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
3835        fcp->iotag_arr = iotag;
3836
3837        INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
3838        INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
3839        INIT_LIST_HEAD(&fcp->iotag_unused_q);
3840
3841        num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
3842        for (i = 0; i < num_io_req; i++, iotag++) {
3843                memset(iotag, 0, sizeof(struct bfa_iotag_s));
3844                iotag->tag = i;
3845                if (i < fcp->num_ioim_reqs)
3846                        list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
3847                else
3848                        list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
3849        }
3850
3851        bfa_mem_kva_curp(fcp) = (u8 *) iotag;
3852}
3853
3854
3855/**
3856 * To send config req, first try to use throttle value from flash
3857 * If 0, then use driver parameter
3858 * We need to use min(flash_val, drv_val) because
3859 * memory allocation was done based on this cfg'd value
3860 */
3861u16
3862bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
3863{
3864        u16 tmp;
3865        struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3866
3867        /*
3868         * If throttle value from flash is already in effect after driver is
3869         * loaded then until next load, always return current value instead
3870         * of actual flash value
3871         */
3872        if (!fcp->throttle_update_required)
3873                return (u16)fcp->num_ioim_reqs;
3874
3875        tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
3876        if (!tmp || (tmp > drv_cfg_param))
3877                tmp = drv_cfg_param;
3878
3879        return tmp;
3880}
3881
3882bfa_status_t
3883bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value)
3884{
3885        if (!bfa_dconf_get_min_cfg(bfa)) {
3886                BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
3887                BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
3888                return BFA_STATUS_OK;
3889        }
3890
3891        return BFA_STATUS_FAILED;
3892}
3893
3894u16
3895bfa_fcpim_read_throttle(struct bfa_s *bfa)
3896{
3897        struct bfa_throttle_cfg_s *throttle_cfg =
3898                        &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
3899
3900        return ((!bfa_dconf_get_min_cfg(bfa)) ?
3901               ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
3902}
3903
3904bfa_status_t
3905bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value)
3906{
3907        /* in min cfg no commands should run. */
3908        if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3909            (!bfa_dconf_read_data_valid(bfa)))
3910                return BFA_STATUS_FAILED;
3911
3912        bfa_fcpim_write_throttle(bfa, value);
3913
3914        return bfa_dconf_update(bfa);
3915}
3916
3917bfa_status_t
3918bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
3919{
3920        struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3921        struct bfa_defs_fcpim_throttle_s throttle;
3922
3923        if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
3924            (!bfa_dconf_read_data_valid(bfa)))
3925                return BFA_STATUS_FAILED;
3926
3927        memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
3928
3929        throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
3930        throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
3931        if (!throttle.cfg_value)
3932                throttle.cfg_value = throttle.cur_value;
3933        throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
3934        memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
3935
3936        return BFA_STATUS_OK;
3937}
3938