linux/drivers/scsi/qla2xxx/qla_inline.h
<<
>>
Prefs
   1/*
   2 * QLogic Fibre Channel HBA Driver
   3 * Copyright (c)  2003-2013 QLogic Corporation
   4 *
   5 * See LICENSE.qla2xxx for copyright and licensing details.
   6 */
   7
   8/**
   9 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  10 * Continuation Type 1 IOCBs to allocate.
  11 *
  12 * @dsds: number of data segment decriptors needed
  13 *
  14 * Returns the number of IOCB entries needed to store @dsds.
  15 */
  16static inline uint16_t
  17qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
  18{
  19        uint16_t iocbs;
  20
  21        iocbs = 1;
  22        if (dsds > 1) {
  23                iocbs += (dsds - 1) / 5;
  24                if ((dsds - 1) % 5)
  25                        iocbs++;
  26        }
  27        return iocbs;
  28}
  29
  30/*
  31 * qla2x00_debounce_register
  32 *      Debounce register.
  33 *
  34 * Input:
  35 *      port = register address.
  36 *
  37 * Returns:
  38 *      register value.
  39 */
  40static __inline__ uint16_t
  41qla2x00_debounce_register(volatile uint16_t __iomem *addr)
  42{
  43        volatile uint16_t first;
  44        volatile uint16_t second;
  45
  46        do {
  47                first = RD_REG_WORD(addr);
  48                barrier();
  49                cpu_relax();
  50                second = RD_REG_WORD(addr);
  51        } while (first != second);
  52
  53        return (first);
  54}
  55
  56static inline void
  57qla2x00_poll(struct rsp_que *rsp)
  58{
  59        unsigned long flags;
  60        struct qla_hw_data *ha = rsp->hw;
  61        local_irq_save(flags);
  62        if (IS_P3P_TYPE(ha))
  63                qla82xx_poll(0, rsp);
  64        else
  65                ha->isp_ops->intr_handler(0, rsp);
  66        local_irq_restore(flags);
  67}
  68
  69static inline uint8_t *
  70host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
  71{
  72       uint32_t *ifcp = (uint32_t *) fcp;
  73       uint32_t *ofcp = (uint32_t *) fcp;
  74       uint32_t iter = bsize >> 2;
  75
  76       for (; iter ; iter--)
  77               *ofcp++ = swab32(*ifcp++);
  78
  79       return fcp;
  80}
  81
  82static inline void
  83host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
  84{
  85        uint32_t *isrc = (uint32_t *) src;
  86        __le32 *odest = (__le32 *) dst;
  87        uint32_t iter = bsize >> 2;
  88
  89        for (; iter ; iter--)
  90                *odest++ = cpu_to_le32(*isrc++);
  91}
  92
  93static inline void
  94qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
  95{
  96        int i;
  97
  98        if (IS_FWI2_CAPABLE(ha))
  99                return;
 100
 101        for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
 102                set_bit(i, ha->loop_id_map);
 103        set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
 104        set_bit(BROADCAST, ha->loop_id_map);
 105}
 106
 107static inline int
 108qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
 109{
 110        struct qla_hw_data *ha = vha->hw;
 111        if (IS_FWI2_CAPABLE(ha))
 112                return (loop_id > NPH_LAST_HANDLE);
 113
 114        return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
 115            loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
 116}
 117
 118static inline void
 119qla2x00_clear_loop_id(fc_port_t *fcport) {
 120        struct qla_hw_data *ha = fcport->vha->hw;
 121
 122        if (fcport->loop_id == FC_NO_LOOP_ID ||
 123            qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
 124                return;
 125
 126        clear_bit(fcport->loop_id, ha->loop_id_map);
 127        fcport->loop_id = FC_NO_LOOP_ID;
 128}
 129
 130static inline void
 131qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
 132{
 133        struct dsd_dma *dsd_ptr, *tdsd_ptr;
 134        struct crc_context *ctx;
 135
 136        ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
 137
 138        /* clean up allocated prev pool */
 139        list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
 140            &ctx->dsd_list, list) {
 141                dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
 142                    dsd_ptr->dsd_list_dma);
 143                list_del(&dsd_ptr->list);
 144                kfree(dsd_ptr);
 145        }
 146        INIT_LIST_HEAD(&ctx->dsd_list);
 147}
 148
 149static inline void
 150qla2x00_set_fcport_state(fc_port_t *fcport, int state)
 151{
 152        int old_state;
 153
 154        old_state = atomic_read(&fcport->state);
 155        atomic_set(&fcport->state, state);
 156
 157        /* Don't print state transitions during initial allocation of fcport */
 158        if (old_state && old_state != state) {
 159                ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
 160                    "FCPort state transitioned from %s to %s - "
 161                    "portid=%02x%02x%02x.\n",
 162                    port_state_str[old_state], port_state_str[state],
 163                    fcport->d_id.b.domain, fcport->d_id.b.area,
 164                    fcport->d_id.b.al_pa);
 165        }
 166}
 167
 168static inline int
 169qla2x00_hba_err_chk_enabled(srb_t *sp)
 170{
 171        /*
 172         * Uncomment when corresponding SCSI changes are done.
 173         *
 174        if (!sp->cmd->prot_chk)
 175                return 0;
 176         *
 177         */
 178        switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
 179        case SCSI_PROT_READ_STRIP:
 180        case SCSI_PROT_WRITE_INSERT:
 181                if (ql2xenablehba_err_chk >= 1)
 182                        return 1;
 183                break;
 184        case SCSI_PROT_READ_PASS:
 185        case SCSI_PROT_WRITE_PASS:
 186                if (ql2xenablehba_err_chk >= 2)
 187                        return 1;
 188                break;
 189        case SCSI_PROT_READ_INSERT:
 190        case SCSI_PROT_WRITE_STRIP:
 191                return 1;
 192        }
 193        return 0;
 194}
 195
 196static inline int
 197qla2x00_reset_active(scsi_qla_host_t *vha)
 198{
 199        scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
 200
 201        /* Test appropriate base-vha and vha flags. */
 202        return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
 203            test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
 204            test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
 205            test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
 206            test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
 207}
 208
 209static inline srb_t *
 210qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
 211{
 212        srb_t *sp = NULL;
 213        struct qla_hw_data *ha = vha->hw;
 214        uint8_t bail;
 215
 216        QLA_VHA_MARK_BUSY(vha, bail);
 217        if (unlikely(bail))
 218                return NULL;
 219
 220        sp = mempool_alloc(ha->srb_mempool, flag);
 221        if (!sp)
 222                goto done;
 223
 224        memset(sp, 0, sizeof(*sp));
 225        sp->fcport = fcport;
 226        sp->iocbs = 1;
 227done:
 228        if (!sp)
 229                QLA_VHA_MARK_NOT_BUSY(vha);
 230        return sp;
 231}
 232
 233static inline void
 234qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
 235{
 236        mempool_free(sp, vha->hw->srb_mempool);
 237        QLA_VHA_MARK_NOT_BUSY(vha);
 238}
 239
 240static inline void
 241qla2x00_init_timer(srb_t *sp, unsigned long tmo)
 242{
 243        init_timer(&sp->u.iocb_cmd.timer);
 244        sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
 245        sp->u.iocb_cmd.timer.data = (unsigned long)sp;
 246        sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
 247        add_timer(&sp->u.iocb_cmd.timer);
 248        sp->free = qla2x00_sp_free;
 249        if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
 250            (sp->type == SRB_FXIOCB_DCMD))
 251                init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
 252}
 253
 254static inline int
 255qla2x00_gid_list_size(struct qla_hw_data *ha)
 256{
 257        if (IS_QLAFX00(ha))
 258                return sizeof(uint32_t) * 32;
 259        else
 260                return sizeof(struct gid_list_info) * ha->max_fibre_devices;
 261}
 262
 263static inline void
 264qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
 265{
 266        if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth)
 267                return;
 268
 269        /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */
 270        if (time_before(jiffies, (vha->hw->host_last_rampdown_time +
 271            HOST_QUEUE_RAMPDOWN_INTERVAL)))
 272                return;
 273
 274        /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */
 275        if (time_before(jiffies, (vha->hw->host_last_rampup_time +
 276            HOST_QUEUE_RAMPUP_INTERVAL)))
 277                return;
 278
 279        set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
 280}
 281
 282static inline void
 283qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
 284{
 285        if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
 286            (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
 287                set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 288                clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
 289                complete(&ha->mbx_intr_comp);
 290        }
 291}
 292